code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : Tuple = psutil.Process()
_lowerCamelCase : Optional[int] = False
def A_ ( self ):
_lowerCamelCase : Tuple = -1
while True:
_lowerCamelCase : List[Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A_ ( self ):
_lowerCamelCase : List[str] = True
_lowerCamelCase : Union[str, Any] = threading.Thread(target=self.peak_monitor )
_lowerCamelCase : Optional[Any] = True
self.thread.start()
def A_ ( self ):
_lowerCamelCase : Optional[Any] = False
self.thread.join()
return self.cpu_memory_peak
lowercase__ = PeakCPUMemory()
def _snake_case ( ):
# Time
_lowerCamelCase : Dict = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCamelCase : str = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCamelCase : List[Any] = torch.cuda.memory_allocated(lowercase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def _snake_case ( lowercase__ ):
# Time
_lowerCamelCase : Any = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_lowerCamelCase : Dict = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
_lowerCamelCase : str = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_lowerCamelCase : List[Any] = (torch.cuda.memory_allocated(lowercase__ ) - start_measures[str(lowercase__ )]) / 2**20
_lowerCamelCase : Union[str, Any] = (torch.cuda.max_memory_allocated(lowercase__ ) - start_measures[str(lowercase__ )]) / 2**20
return measures
def _snake_case ( lowercase__ , lowercase__ ):
print(f'''{description}:''' )
print(f'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(lowercase__ )]:.2f}MiB''' )
_lowerCamelCase : Any = measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 96 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main()
| 96 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase : Tuple = logging.get_logger(__name__)
# General docstring
lowercase : Optional[int] = """ResNetConfig"""
# Base docstring
lowercase : Any = """microsoft/resnet-50"""
lowercase : Optional[Any] = [1, 2048, 7, 7]
# Image classification docstring
lowercase : str = """microsoft/resnet-50"""
lowercase : List[str] = """tiger cat"""
lowercase : List[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __snake_case ( nn.Module ):
def __init__( self ,snake_case ,snake_case ,snake_case = 3 ,snake_case = 1 ,snake_case = "relu" ):
'''simple docstring'''
super().__init__()
lowercase : int = nn.Convad(
snake_case ,snake_case ,kernel_size=snake_case ,stride=snake_case ,padding=kernel_size // 2 ,bias=snake_case )
lowercase : int = nn.BatchNormad(snake_case )
lowercase : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Any = self.convolution(snake_case )
lowercase : List[str] = self.normalization(snake_case )
lowercase : List[Any] = self.activation(snake_case )
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self ,snake_case ):
'''simple docstring'''
super().__init__()
lowercase : Union[str, Any] = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
lowercase : Union[str, Any] = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
lowercase : Any = config.num_channels
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase : str = self.embedder(snake_case )
lowercase : int = self.pooler(snake_case )
return embedding
class __snake_case ( nn.Module ):
def __init__( self ,snake_case ,snake_case ,snake_case = 2 ):
'''simple docstring'''
super().__init__()
lowercase : Tuple = nn.Convad(snake_case ,snake_case ,kernel_size=1 ,stride=snake_case ,bias=snake_case )
lowercase : Tuple = nn.BatchNormad(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = self.convolution(snake_case )
lowercase : List[Any] = self.normalization(snake_case )
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self ,snake_case ,snake_case ,snake_case = 1 ,snake_case = "relu" ):
'''simple docstring'''
super().__init__()
lowercase : int = in_channels != out_channels or stride != 1
lowercase : Optional[Any] = (
ResNetShortCut(snake_case ,snake_case ,stride=snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase : str = nn.Sequential(
ResNetConvLayer(snake_case ,snake_case ,stride=snake_case ) ,ResNetConvLayer(snake_case ,snake_case ,activation=snake_case ) ,)
lowercase : Union[str, Any] = ACTaFN[activation]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = hidden_state
lowercase : str = self.layer(snake_case )
lowercase : List[Any] = self.shortcut(snake_case )
hidden_state += residual
lowercase : str = self.activation(snake_case )
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self ,snake_case ,snake_case ,snake_case = 1 ,snake_case = "relu" ,snake_case = 4 ):
'''simple docstring'''
super().__init__()
lowercase : Optional[Any] = in_channels != out_channels or stride != 1
lowercase : List[Any] = out_channels // reduction
lowercase : Optional[Any] = (
ResNetShortCut(snake_case ,snake_case ,stride=snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase : Dict = nn.Sequential(
ResNetConvLayer(snake_case ,snake_case ,kernel_size=1 ) ,ResNetConvLayer(snake_case ,snake_case ,stride=snake_case ) ,ResNetConvLayer(snake_case ,snake_case ,kernel_size=1 ,activation=snake_case ) ,)
lowercase : Union[str, Any] = ACTaFN[activation]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : int = hidden_state
lowercase : int = self.layer(snake_case )
lowercase : Dict = self.shortcut(snake_case )
hidden_state += residual
lowercase : str = self.activation(snake_case )
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case = 2 ,snake_case = 2 ,):
'''simple docstring'''
super().__init__()
lowercase : Tuple = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
lowercase : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(snake_case ,snake_case ,stride=snake_case ,activation=config.hidden_act ) ,*[layer(snake_case ,snake_case ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = input
for layer in self.layers:
lowercase : int = layer(snake_case )
return hidden_state
class __snake_case ( nn.Module ):
def __init__( self ,snake_case ):
'''simple docstring'''
super().__init__()
lowercase : List[str] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowercase : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(snake_case ,config.depths[1:] ):
self.stages.append(ResNetStage(snake_case ,snake_case ,snake_case ,depth=snake_case ) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = False ,snake_case = True ):
'''simple docstring'''
lowercase : str = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase : List[Any] = hidden_states + (hidden_state,)
lowercase : Optional[int] = stage_module(snake_case )
if output_hidden_states:
lowercase : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case ,hidden_states=snake_case ,)
class __snake_case ( lowerCAmelCase ):
_a : Optional[int]= ResNetConfig
_a : int= "resnet"
_a : Union[str, Any]= "pixel_values"
_a : int= True
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if isinstance(snake_case ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode="""fan_out""" ,nonlinearity="""relu""" )
elif isinstance(snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=False ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
lowercase : Union[str, Any] = value
lowercase : Union[str, Any] = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowercase : Dict = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , lowerCAmelCase , )
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ):
'''simple docstring'''
super().__init__(snake_case )
lowercase : Union[str, Any] = config
lowercase : Optional[int] = ResNetEmbeddings(snake_case )
lowercase : Any = ResNetEncoder(snake_case )
lowercase : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=snake_case ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = None ):
'''simple docstring'''
lowercase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : str = self.embedder(snake_case )
lowercase : Optional[int] = self.encoder(
snake_case ,output_hidden_states=snake_case ,return_dict=snake_case )
lowercase : Dict = encoder_outputs[0]
lowercase : List[Any] = self.pooler(snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case ,pooler_output=snake_case ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase , )
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ):
'''simple docstring'''
super().__init__(snake_case )
lowercase : Union[str, Any] = config.num_labels
lowercase : Optional[int] = ResNetModel(snake_case )
# classification head
lowercase : int = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,):
'''simple docstring'''
lowercase : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Tuple = self.resnet(snake_case ,output_hidden_states=snake_case ,return_dict=snake_case )
lowercase : Any = outputs.pooler_output if return_dict else outputs[1]
lowercase : Tuple = self.classifier(snake_case )
lowercase : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase : Optional[Any] = """single_label_classification"""
else:
lowercase : Optional[Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase : Dict = MSELoss()
if self.num_labels == 1:
lowercase : List[str] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowercase : List[str] = loss_fct(snake_case ,snake_case )
elif self.config.problem_type == "single_label_classification":
lowercase : Dict = CrossEntropyLoss()
lowercase : int = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase : Dict = BCEWithLogitsLoss()
lowercase : Dict = loss_fct(snake_case ,snake_case )
if not return_dict:
lowercase : Union[str, Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case ,logits=snake_case ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , lowerCAmelCase , )
class __snake_case ( lowerCAmelCase , lowerCAmelCase ):
def __init__( self ,snake_case ):
'''simple docstring'''
super().__init__(snake_case )
super()._init_backbone(snake_case )
lowercase : List[str] = [config.embedding_size] + config.hidden_sizes
lowercase : List[Any] = ResNetEmbeddings(snake_case )
lowercase : List[str] = ResNetEncoder(snake_case )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@replace_return_docstrings(output_type=snake_case ,config_class=_CONFIG_FOR_DOC )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = None ):
'''simple docstring'''
lowercase : str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : List[str] = self.embedder(snake_case )
lowercase : List[str] = self.encoder(snake_case ,output_hidden_states=snake_case ,return_dict=snake_case )
lowercase : Optional[Any] = outputs.hidden_states
lowercase : Dict = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowercase : Optional[Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=snake_case ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=snake_case ,)
| 369 |
import math
from datetime import datetime, timedelta
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> datetime:
lowercase : Any = year % 19
lowercase : Optional[int] = year % 4
lowercase : Any = year % 7
lowercase : str = math.floor(year / 100 )
lowercase : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowercase : Tuple = leap_day_inhibits / 4
lowercase : Any = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowercase : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowercase : str = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowercase : List[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
lowercase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 285 | 0 |
lowercase__ :Optional[Any] = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 101 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowercase__ :Optional[int] = pytest.mark.integration
@require_faiss
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
lowercase = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(A__) for x in np.arange(3_0).tolist()]})
return dset
def A__ ( self):
import faiss
lowercase = self._create_dummy_dataset()
lowercase = dset.map(
lambda A__ ,A__: {"vecs": i * np.ones(5 ,dtype=np.floataa)} ,with_indices=A__ ,keep_in_memory=A__)
lowercase = dset.add_faiss_index('''vecs''' ,batch_size=1_0_0 ,metric_type=faiss.METRIC_INNER_PRODUCT)
lowercase , lowercase = dset.get_nearest_examples('''vecs''' ,np.ones(5 ,dtype=np.floataa))
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''')
dset.drop_index('''vecs''')
def A__ ( self):
import faiss
lowercase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 ,1) ,index_name='''vecs''' ,batch_size=1_0_0 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
lowercase , lowercase = dset.get_nearest_examples('''vecs''' ,np.ones(5 ,dtype=np.floataa))
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''')
def A__ ( self):
import faiss
lowercase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 ,1) ,index_name='''vecs''' ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A__) as tmp_file:
dset.save_faiss_index('''vecs''' ,tmp_file.name)
dset.load_faiss_index('''vecs2''' ,tmp_file.name)
os.unlink(tmp_file.name)
lowercase , lowercase = dset.get_nearest_examples('''vecs2''' ,np.ones(5 ,dtype=np.floataa))
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''')
def A__ ( self):
lowercase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 ,1) ,index_name='''vecs''')
dset.drop_index('''vecs''')
self.assertRaises(A__ ,partial(dset.get_nearest_examples ,'''vecs2''' ,np.ones(5 ,dtype=np.floataa)))
def A__ ( self):
from elasticsearch import Elasticsearch
lowercase = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''') as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''') as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''') as mocked_bulk:
lowercase = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 3_0)
lowercase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 2_9}]}}
lowercase = Elasticsearch()
dset.add_elasticsearch_index('''filename''' ,es_client=A__)
lowercase , lowercase = dset.get_nearest_examples('''filename''' ,'''my_name-train_29''')
self.assertEqual(examples['''filename'''][0] ,'''my_name-train_29''')
@require_faiss
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
import faiss
lowercase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal ,5)
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal ,1_0)
# single query
lowercase = np.zeros(5 ,dtype=np.floataa)
lowercase = 1
lowercase , lowercase = index.search(A__)
self.assertRaises(A__ ,index.search ,query.reshape(-1 ,1))
self.assertGreater(scores[0] ,0)
self.assertEqual(indices[0] ,1)
# batched queries
lowercase = np.eye(5 ,dtype=np.floataa)[::-1]
lowercase , lowercase = index.search_batch(A__)
self.assertRaises(A__ ,index.search_batch ,queries[0])
lowercase = [scores[0] for scores in total_scores]
lowercase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__) ,0)
self.assertListEqual([4, 3, 2, 1, 0] ,A__)
def A__ ( self):
import faiss
lowercase = FaissIndex(string_factory='''Flat''')
index.add_vectors(np.eye(5 ,dtype=np.floataa))
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat)
lowercase = FaissIndex(string_factory='''LSH''')
index.add_vectors(np.eye(5 ,dtype=np.floataa))
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH)
with self.assertRaises(A__):
lowercase = FaissIndex(string_factory='''Flat''' ,custom_index=faiss.IndexFlat(5))
def A__ ( self):
import faiss
lowercase = faiss.IndexFlat(5)
lowercase = FaissIndex(custom_index=A__)
index.add_vectors(np.eye(5 ,dtype=np.floataa))
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat)
def A__ ( self):
import faiss
lowercase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 ,dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A__) as tmp_file:
index.save(tmp_file.name)
lowercase = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
lowercase = np.zeros(5 ,dtype=np.floataa)
lowercase = 1
lowercase , lowercase = index.search(A__)
self.assertGreater(scores[0] ,0)
self.assertEqual(indices[0] ,1)
@require_faiss
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
import faiss
lowercase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowercase = '''index.faiss'''
lowercase = f'mock://{index_name}'
index.save(lowerCAmelCase__ , storage_options=mockfs.storage_options )
lowercase = FaissIndex.load(lowerCAmelCase__ , storage_options=mockfs.storage_options )
lowercase = np.zeros(5 , dtype=np.floataa )
lowercase = 1
lowercase , lowercase = index.search(lowerCAmelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def A__ ( self):
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''') as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''') as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''') as mocked_bulk:
lowercase = Elasticsearch()
lowercase = {'''acknowledged''': True}
lowercase = ElasticSearchIndex(es_client=A__)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['''foo''', '''bar''', '''foobar'''])
# single query
lowercase = '''foo'''
lowercase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowercase , lowercase = index.search(A__)
self.assertEqual(scores[0] ,1)
self.assertEqual(indices[0] ,0)
# single query with timeout
lowercase = '''foo'''
lowercase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowercase , lowercase = index.search(A__ ,request_timeout=3_0)
self.assertEqual(scores[0] ,1)
self.assertEqual(indices[0] ,0)
# batched queries
lowercase = ['''foo''', '''bar''', '''foobar''']
lowercase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowercase , lowercase = index.search_batch(A__)
lowercase = [scores[0] for scores in total_scores]
lowercase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__) ,0)
self.assertListEqual([1, 1, 1] ,A__)
# batched queries with timeout
lowercase = ['''foo''', '''bar''', '''foobar''']
lowercase = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowercase , lowercase = index.search_batch(A__ ,request_timeout=3_0)
lowercase = [scores[0] for scores in total_scores]
lowercase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A__) ,0)
self.assertListEqual([1, 1, 1] ,A__)
| 101 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
snake_case__ = 8
def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any]=BITS ) -> List[str]:
A_ : Any = x.device
A_ : Union[str, Any] = (x * 2_5_5).int().clamp(0 , 2_5_5 )
A_ : Optional[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase__ )
A_ : Any = rearrange(lowerCamelCase__ , '''d -> d 1 1''' )
A_ : Union[str, Any] = rearrange(lowerCamelCase__ , '''b c h w -> b c 1 h w''' )
A_ : Tuple = ((x & mask) != 0).float()
A_ : Union[str, Any] = rearrange(lowerCamelCase__ , '''b c d h w -> b (c d) h w''' )
A_ : str = bits * 2 - 1
return bits
def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any]=BITS ) -> str:
A_ : Union[str, Any] = x.device
A_ : Optional[int] = (x > 0).int()
A_ : Dict = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase__ , dtype=torch.intaa )
A_ : Tuple = rearrange(lowerCamelCase__ , '''d -> d 1 1''' )
A_ : List[str] = rearrange(lowerCamelCase__ , '''b (c d) h w -> b c d h w''' , d=8 )
A_ : Tuple = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 2_5_5).clamp(0.0 , 1.0 )
def snake_case__ ( self : Dict , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : int , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : bool = True , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A_ : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A_ : Dict = self.alphas_cumprod[timestep]
A_ : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A_ : List[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A_ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A_ : str = self.bit_scale
if self.config.clip_sample:
A_ : List[str] = torch.clamp(lowerCamelCase__ , -scale , lowerCamelCase__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A_ : Dict = self._get_variance(lowerCamelCase__ , lowerCamelCase__ )
A_ : Any = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A_ : str = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A_ : Optional[int] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A_ : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A_ : Optional[int] = model_output.device if torch.is_tensor(lowerCamelCase__ ) else '''cpu'''
A_ : Tuple = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : Any = self._get_variance(lowerCamelCase__ , lowerCamelCase__ ) ** 0.5 * eta * noise
A_ : Optional[int] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def snake_case__ ( self : List[str] , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : int , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : Optional[Any]="epsilon" , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
A_ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A_ ,A_ : List[str] = torch.split(lowerCamelCase__ , sample.shape[1] , dim=1 )
else:
A_ : int = None
# 1. compute alphas, betas
A_ : List[str] = self.alphas_cumprod[t]
A_ : Any = self.alphas_cumprod[t - 1] if t > 0 else self.one
A_ : Optional[Any] = 1 - alpha_prod_t
A_ : Dict = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A_ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A_ : List[str] = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A_ : Optional[int] = self.bit_scale
if self.config.clip_sample:
A_ : Any = torch.clamp(lowerCamelCase__ , -scale , lowerCamelCase__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ : List[Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A_ : int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ : List[str] = 0
if t > 0:
A_ : List[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCamelCase__ ).to(model_output.device )
A_ : Any = (self._get_variance(lowerCamelCase__ , predicted_variance=lowerCamelCase__ ) ** 0.5) * noise
A_ : str = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
class UpperCamelCase_ (a__ ):
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : UNetaDConditionModel , _lowerCamelCase : Union[DDIMScheduler, DDPMScheduler] , _lowerCamelCase : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
A_ : Any = bit_scale
A_ : Optional[Any] = (
ddim_bit_scheduler_step if isinstance(_lowerCamelCase , _lowerCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self : Tuple , _lowerCamelCase : Optional[int] = 256 , _lowerCamelCase : Optional[int] = 256 , _lowerCamelCase : Optional[int] = 50 , _lowerCamelCase : Optional[torch.Generator] = None , _lowerCamelCase : Optional[int] = 1 , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , **_lowerCamelCase : Any , ):
"""simple docstring"""
A_ : Optional[int] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_lowerCamelCase , )
A_ : str = decimal_to_bits(_lowerCamelCase ) * self.bit_scale
A_ : int = latents.to(self.device )
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A_ : Optional[int] = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
A_ : Dict = bits_to_decimal(_lowerCamelCase )
if output_type == "pil":
A_ : Dict = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 4 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = num_channels
A_ : Any = embeddings_size
A_ : int = hidden_sizes
A_ : Optional[Any] = depths
A_ : List[Any] = is_training
A_ : Optional[int] = use_labels
A_ : int = hidden_act
A_ : Tuple = num_labels
A_ : Union[str, Any] = scope
A_ : List[Any] = len(_lowerCamelCase )
A_ : Union[str, Any] = out_features
A_ : List[Any] = out_indices
A_ : Dict = num_groups
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Any = BitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ : Optional[Any] = None
A_ : int = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Optional[int]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCAmelCase = BitConfig
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BitModelTester(self )
| 4 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __SCREAMING_SNAKE_CASE ( *UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__=True , UpperCamelCase__=2 ) -> Dict:
'''simple docstring'''
from .. import __version__
UpperCAmelCase = take_from
UpperCAmelCase = ()
if not isinstance(args[0] , _lowerCAmelCase ):
UpperCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCAmelCase ).base_version ) >= version.parse(_lowerCAmelCase ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
UpperCAmelCase = None
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCAmelCase ),)
UpperCAmelCase = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(_lowerCAmelCase , _lowerCAmelCase ):
values += (getattr(_lowerCAmelCase , _lowerCAmelCase ),)
UpperCAmelCase = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
UpperCAmelCase = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
UpperCAmelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCAmelCase , stacklevel=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
UpperCAmelCase = call_frame.filename
UpperCAmelCase = call_frame.lineno
UpperCAmelCase = call_frame.function
UpperCAmelCase , UpperCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(_lowerCAmelCase ) == 0:
return
elif len(_lowerCAmelCase ) == 1:
return values[0]
return values
| 273 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """mgp-str"""
def __init__( self : int , _lowerCAmelCase : str=[3_2, 1_2_8] , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : int=3 , _lowerCAmelCase : str=2_7 , _lowerCAmelCase : List[str]=3_8 , _lowerCAmelCase : Tuple=5_0_2_5_7 , _lowerCAmelCase : str=3_0_5_2_2 , _lowerCAmelCase : Optional[int]=7_6_8 , _lowerCAmelCase : Optional[int]=1_2 , _lowerCAmelCase : Optional[Any]=1_2 , _lowerCAmelCase : Optional[int]=4.0 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[Any]=1e-5 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : str=False , _lowerCAmelCase : List[Any]=0.02 , **_lowerCAmelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =max_token_length
__lowercase =num_character_labels
__lowercase =num_bpe_labels
__lowercase =num_wordpiece_labels
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =mlp_ratio
__lowercase =distilled
__lowercase =layer_norm_eps
__lowercase =drop_rate
__lowercase =qkv_bias
__lowercase =attn_drop_rate
__lowercase =drop_path_rate
__lowercase =output_aa_attentions
__lowercase =initializer_range
| 166 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __get__( self: str , __A: str , __A: Tuple=None ) -> List[str]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
_A = '''__cached_''' + self.fget.__name__
_A = getattr(__A , __A , __A )
if cached is None:
_A = self.fget(__A )
setattr(__A , __A , __A )
return cached
def __A ( _lowercase ):
'''simple docstring'''
_A = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def __A ( _lowercase ):
'''simple docstring'''
if is_torch_fx_proxy(_lowercase ):
return True
if is_torch_available():
import torch
if isinstance(_lowercase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowercase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowercase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowercase , np.ndarray )
def __A ( _lowercase ):
'''simple docstring'''
return isinstance(_lowercase , np.ndarray )
def __A ( _lowercase ):
'''simple docstring'''
return _is_numpy(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
import torch
return isinstance(_lowercase , torch.Tensor )
def __A ( _lowercase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
import torch
return isinstance(_lowercase , torch.device )
def __A ( _lowercase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
import torch
if isinstance(_lowercase , _lowercase ):
if hasattr(_lowercase , _lowercase ):
_A = getattr(_lowercase , _lowercase )
else:
return False
return isinstance(_lowercase , torch.dtype )
def __A ( _lowercase ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
import tensorflow as tf
return isinstance(_lowercase , tf.Tensor )
def __A ( _lowercase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowercase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(_lowercase )
return type(_lowercase ) == tf.Tensor
def __A ( _lowercase ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(_lowercase , jnp.ndarray )
def __A ( _lowercase ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase , (dict, UserDict) ):
return {k: to_py_obj(_lowercase ) for k, v in obj.items()}
elif isinstance(_lowercase , (list, tuple) ):
return [to_py_obj(_lowercase ) for o in obj]
elif is_tf_tensor(_lowercase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowercase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowercase ):
return np.asarray(_lowercase ).tolist()
elif isinstance(_lowercase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __A ( _lowercase ):
'''simple docstring'''
if isinstance(_lowercase , (dict, UserDict) ):
return {k: to_numpy(_lowercase ) for k, v in obj.items()}
elif isinstance(_lowercase , (list, tuple) ):
return np.array(_lowercase )
elif is_tf_tensor(_lowercase ):
return obj.numpy()
elif is_torch_tensor(_lowercase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowercase ):
return np.asarray(_lowercase )
else:
return obj
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __A ( self: str ) -> Optional[int]:
_A = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
_A = getattr(self , class_fields[0].name )
_A = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A , __A ):
_A = first_field.items()
_A = True
else:
try:
_A = iter(__A )
_A = True
except TypeError:
_A = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A , (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0] , __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_A = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
_A = element[1]
elif first_field is not None:
_A = first_field
else:
for field in class_fields:
_A = getattr(self , field.name )
if v is not None:
_A = v
def __delitem__( self: List[str] , *__A: List[str] , **__A: str ) -> Optional[Any]:
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def __A ( self: List[str] , *__A: int , **__A: Tuple ) -> Dict:
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def __A ( self: Dict , *__A: Optional[Any] , **__A: Union[str, Any] ) -> int:
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def __A ( self: int , *__A: Dict , **__A: str ) -> Tuple:
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self: List[str] , __A: Optional[int] ) -> str:
if isinstance(__A , __A ):
_A = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self: Optional[int] , __A: str , __A: Dict ) -> Dict:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A , __A )
super().__setattr__(__A , __A )
def __setitem__( self: List[str] , __A: Tuple , __A: Any ) -> List[str]:
# Will raise a KeyException if needed
super().__setitem__(__A , __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A , __A )
def __A ( self: Any ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class SCREAMING_SNAKE_CASE ( snake_case , snake_case ):
"""simple docstring"""
@classmethod
def __A ( cls: Dict , __A: List[Any] ) -> Optional[int]:
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "longest"
A_ = "max_length"
A_ = "do_not_pad"
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "pt"
A_ = "tf"
A_ = "np"
A_ = "jax"
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: List[Any] , __A: List[ContextManager] ) -> str:
_A = context_managers
_A = ExitStack()
def __enter__( self: List[str] ) -> str:
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self: Union[str, Any] , *__A: int , **__A: List[Any] ) -> int:
self.stack.__exit__(*__A , **__A )
def __A ( _lowercase ):
'''simple docstring'''
_A = infer_framework(_lowercase )
if framework == "tf":
_A = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_A = inspect.signature(model_class.forward ) # PyTorch models
else:
_A = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __A ( _lowercase ):
'''simple docstring'''
_A = model_class.__name__
_A = infer_framework(_lowercase )
if framework == "tf":
_A = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_A = inspect.signature(model_class.forward ) # PyTorch models
else:
_A = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __A ( _lowercase , _lowercase = "" , _lowercase = "." ):
'''simple docstring'''
def _flatten_dict(_lowercase , _lowercase="" , _lowercase="." ):
for k, v in d.items():
_A = str(_lowercase ) + delimiter + str(_lowercase ) if parent_key else k
if v and isinstance(_lowercase , _lowercase ):
yield from flatten_dict(_lowercase , _lowercase , delimiter=_lowercase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowercase , _lowercase , _lowercase ) )
@contextmanager
def __A ( _lowercase , _lowercase = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __A ( _lowercase , _lowercase=None ):
'''simple docstring'''
if is_numpy_array(_lowercase ):
return np.transpose(_lowercase , axes=_lowercase )
elif is_torch_tensor(_lowercase ):
return array.T if axes is None else array.permute(*_lowercase )
elif is_tf_tensor(_lowercase ):
import tensorflow as tf
return tf.transpose(_lowercase , perm=_lowercase )
elif is_jax_tensor(_lowercase ):
return jnp.transpose(_lowercase , axes=_lowercase )
else:
raise ValueError(f"""Type not supported for transpose: {type(_lowercase )}.""" )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
if is_numpy_array(_lowercase ):
return np.reshape(_lowercase , _lowercase )
elif is_torch_tensor(_lowercase ):
return array.reshape(*_lowercase )
elif is_tf_tensor(_lowercase ):
import tensorflow as tf
return tf.reshape(_lowercase , _lowercase )
elif is_jax_tensor(_lowercase ):
return jnp.reshape(_lowercase , _lowercase )
else:
raise ValueError(f"""Type not supported for reshape: {type(_lowercase )}.""" )
def __A ( _lowercase , _lowercase=None ):
'''simple docstring'''
if is_numpy_array(_lowercase ):
return np.squeeze(_lowercase , axis=_lowercase )
elif is_torch_tensor(_lowercase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowercase )
elif is_tf_tensor(_lowercase ):
import tensorflow as tf
return tf.squeeze(_lowercase , axis=_lowercase )
elif is_jax_tensor(_lowercase ):
return jnp.squeeze(_lowercase , axis=_lowercase )
else:
raise ValueError(f"""Type not supported for squeeze: {type(_lowercase )}.""" )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
if is_numpy_array(_lowercase ):
return np.expand_dims(_lowercase , _lowercase )
elif is_torch_tensor(_lowercase ):
return array.unsqueeze(dim=_lowercase )
elif is_tf_tensor(_lowercase ):
import tensorflow as tf
return tf.expand_dims(_lowercase , axis=_lowercase )
elif is_jax_tensor(_lowercase ):
return jnp.expand_dims(_lowercase , axis=_lowercase )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(_lowercase )}.""" )
def __A ( _lowercase ):
'''simple docstring'''
if is_numpy_array(_lowercase ):
return np.size(_lowercase )
elif is_torch_tensor(_lowercase ):
return array.numel()
elif is_tf_tensor(_lowercase ):
import tensorflow as tf
return tf.size(_lowercase )
elif is_jax_tensor(_lowercase ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(_lowercase )}.""" )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(_lowercase , (tuple, list) ):
_A = [f"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
_A = f"""{repo_id}--{value}"""
return auto_map
def __A ( _lowercase ):
'''simple docstring'''
for base_class in inspect.getmro(_lowercase ):
_A = base_class.__module__
_A = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 360 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: List[str] , __A: Optional[NestedDataStructureLike[PathLike]] = None , __A: Optional[NamedSplit] = None , __A: Optional[Features] = None , __A: str = None , __A: bool = False , __A: bool = False , __A: Optional[int] = None , **__A: Tuple , ) -> List[Any]:
_A = path_or_paths
_A = split if split or isinstance(__A , __A ) else '''train'''
_A = features
_A = cache_dir
_A = keep_in_memory
_A = streaming
_A = num_proc
_A = kwargs
@abstractmethod
def __A ( self: Union[str, Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: str , __A: Optional[Features] = None , __A: str = None , __A: bool = False , __A: bool = False , __A: Optional[int] = None , **__A: Optional[int] , ) -> Union[str, Any]:
_A = features
_A = cache_dir
_A = keep_in_memory
_A = streaming
_A = num_proc
_A = kwargs
@abstractmethod
def __A ( self: Any ) -> Union[Dataset, IterableDataset]:
pass
| 75 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""")
lowerCAmelCase = model
lowerCAmelCase = kwargs.get("""model_save_dir""" , _SCREAMING_SNAKE_CASE)
lowerCAmelCase = kwargs.get("""latest_model_name""" , _SCREAMING_SNAKE_CASE)
def __call__( self , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {k: np.array(_SCREAMING_SNAKE_CASE) for k, v in kwargs.items()}
return self.model.run(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None):
"""simple docstring"""
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""")
lowerCAmelCase = 'CPUExecutionProvider'
return ort.InferenceSession(_SCREAMING_SNAKE_CASE , providers=[provider] , sess_options=_SCREAMING_SNAKE_CASE)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase = self.model_save_dir.joinpath(self.latest_model_name)
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE).joinpath(_SCREAMING_SNAKE_CASE)
try:
shutil.copyfile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase = self.model_save_dir.joinpath(_SCREAMING_SNAKE_CASE)
if src_path.exists():
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE).joinpath(_SCREAMING_SNAKE_CASE)
try:
shutil.copyfile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
except shutil.SameFileError:
pass
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase , ):
"""simple docstring"""
if os.path.isfile(_SCREAMING_SNAKE_CASE):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE)
# saving model weights/files
self._save_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
@classmethod
def a_ ( cls , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_SCREAMING_SNAKE_CASE):
lowerCAmelCase = OnnxRuntimeModel.load_model(
os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , provider=_SCREAMING_SNAKE_CASE , sess_options=_SCREAMING_SNAKE_CASE)
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE)
# load model from hub
else:
# download model
lowerCAmelCase = hf_hub_download(
repo_id=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE).parent
lowerCAmelCase = Path(_SCREAMING_SNAKE_CASE).name
lowerCAmelCase = OnnxRuntimeModel.load_model(_SCREAMING_SNAKE_CASE , provider=_SCREAMING_SNAKE_CASE , sess_options=_SCREAMING_SNAKE_CASE)
return cls(model=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
@classmethod
def a_ ( cls , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = None
if len(str(_SCREAMING_SNAKE_CASE).split("""@""")) == 2:
lowerCAmelCase = model_id.split("""@""")
return cls._from_pretrained(
model_id=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 272 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
A_ : str
A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A_ : Optional[int] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"{self.framework}-transfromers-test"
@property
def __lowerCamelCase ( self ):
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework )
| 86 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 1_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = n * (n + 1) * (2 * n + 1) / 6
UpperCAmelCase_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"{solution() = }")
| 106 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def _lowercase (self : Dict , __a : Any ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__(self : Union[str, Any] , __a : Optional[int] , __a : Optional[int] , __a : int ):
if len(__a ) == 0 or len(__a ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(__a ) )
if isinstance(__a , __a ):
UpperCAmelCase_ = [sequences]
UpperCAmelCase_ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCamelCase__ )
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , __a : Union[str, Any]=ZeroShotClassificationArgumentHandler() , *__a : Optional[int] , **__a : List[str] ):
UpperCAmelCase_ = args_parser
super().__init__(*__a , **__a )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def _lowercase (self : str ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def _lowercase (self : Any , __a : Any , __a : int=True , __a : Dict=True , __a : Any=TruncationStrategy.ONLY_FIRST , **__a : Tuple ):
UpperCAmelCase_ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
UpperCAmelCase_ = self.tokenizer.eos_token
try:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase (self : List[str] , **__a : Tuple ):
if kwargs.get("multi_class" , __a ) is not None:
UpperCAmelCase_ = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
UpperCAmelCase_ = {}
if "candidate_labels" in kwargs:
UpperCAmelCase_ = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
UpperCAmelCase_ = kwargs["hypothesis_template"]
UpperCAmelCase_ = {}
if "multi_label" in kwargs:
UpperCAmelCase_ = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__(self : Tuple , __a : Union[str, List[str]] , *__a : Optional[Any] , **__a : Tuple , ):
if len(__a ) == 0:
pass
elif len(__a ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase_ = args[0]
else:
raise ValueError(f"""Unable to understand extra arguments {args}""" )
return super().__call__(__a , **__a )
def _lowercase (self : Optional[int] , __a : Optional[Any] , __a : List[str]=None , __a : Any="This example is {}." ):
UpperCAmelCase_ , UpperCAmelCase_ = self._args_parser(__a , __a , __a )
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a ) ):
UpperCAmelCase_ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a ) - 1,
**model_input,
}
def _lowercase (self : List[str] , __a : Any ):
UpperCAmelCase_ = inputs["candidate_label"]
UpperCAmelCase_ = inputs["sequence"]
UpperCAmelCase_ = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def _lowercase (self : Optional[Any] , __a : List[str] , __a : Tuple=False ):
UpperCAmelCase_ = [outputs["candidate_label"] for outputs in model_outputs]
UpperCAmelCase_ = [outputs["sequence"] for outputs in model_outputs]
UpperCAmelCase_ = np.concatenate([output["logits"].numpy() for output in model_outputs] )
UpperCAmelCase_ = logits.shape[0]
UpperCAmelCase_ = len(__a )
UpperCAmelCase_ = N // n
UpperCAmelCase_ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase_ = self.entailment_id
UpperCAmelCase_ = -1 if entailment_id == 0 else 0
UpperCAmelCase_ = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase_ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase_ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase_ = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase_ = np.exp(__a ) / np.exp(__a ).sum(-1 , keepdims=__a )
UpperCAmelCase_ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 106 | 1 |
import qiskit
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] = 2 ) -> str:
'''simple docstring'''
A__ = qubits
# Using Aer's simulator
A__ = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
A__ = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCamelCase__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCamelCase__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCamelCase__ ) ) , list(range(UpperCamelCase__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
A__ = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1_0_0_0 )
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(f"""Total count for various states are: {quantum_entanglement(3)}""")
| 68 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_UpperCAmelCase : Tuple = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Any = 0xE000
_UpperCAmelCase : Dict = 0xE001
_UpperCAmelCase : Optional[int] = 0xE002
_UpperCAmelCase : Tuple = 0xE003
_UpperCAmelCase : Tuple = 0xE004
# Maps special codepoints to human-readable names.
_UpperCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ):
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , )
# Creates a mapping for looking up the IDs of special symbols.
snake_case_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
snake_case_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
snake_case_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
snake_case_ = UNICODE_VOCAB_SIZE
snake_case_ = len(self._special_codepoints )
@property
def a ( self ):
return self._unicode_vocab_size
def a ( self , snake_case ):
return list(snake_case )
def a ( self , snake_case ):
try:
return ord(snake_case )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def a ( self , snake_case ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(snake_case )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def a ( self , snake_case ):
return "".join(snake_case )
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def a ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
snake_case_ = [1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
result += ([0] * len(snake_case )) + [1]
return result
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def a ( self , snake_case , snake_case = None ):
return ()
| 285 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = StableDiffusionSAGPipeline
UpperCAmelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Dict) ->str:
'''simple docstring'''
torch.manual_seed(0)
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0)
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A__ = CLIPTextModel(UpperCAmelCase__)
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
A__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any]=0) ->Optional[int]:
'''simple docstring'''
if str(UpperCAmelCase__).startswith('''mps'''):
A__ = torch.manual_seed(UpperCAmelCase__)
else:
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__)
A__ = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''')
A__ = sag_pipe.to(UpperCAmelCase__)
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = '''.'''
A__ = torch.manual_seed(0)
A__ = sag_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''')
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
A__ = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
A__ = sag_pipe.to(UpperCAmelCase__)
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = '''.'''
A__ = torch.manual_seed(0)
A__ = sag_pipe(
[prompt] , generator=UpperCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''')
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
'''simple docstring'''
A__ = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
A__ = sag_pipe.to(UpperCAmelCase__)
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = '''.'''
A__ = torch.manual_seed(0)
A__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
A__ = output.images
assert image.shape == (1, 512, 768, 3)
| 356 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : str = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__snake_case =8
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]=BITS ):
lowerCAmelCase = x.device
lowerCAmelCase = (x * 255).int().clamp(0 , 255 )
lowerCAmelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase )
lowerCAmelCase = rearrange(lowerCamelCase , 'd -> d 1 1' )
lowerCAmelCase = rearrange(lowerCamelCase , 'b c h w -> b c 1 h w' )
lowerCAmelCase = ((x & mask) != 0).float()
lowerCAmelCase = rearrange(lowerCamelCase , 'b c d h w -> b (c d) h w' )
lowerCAmelCase = bits * 2 - 1
return bits
def a_ ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str]=BITS ):
lowerCAmelCase = x.device
lowerCAmelCase = (x > 0).int()
lowerCAmelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCamelCase , dtype=torch.intaa )
lowerCAmelCase = rearrange(lowerCamelCase , 'd -> d 1 1' )
lowerCAmelCase = rearrange(lowerCamelCase , 'b (c d) h w -> b c d h w' , d=8 )
lowerCAmelCase = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def a_ ( self : Optional[int] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : int , lowerCamelCase : torch.FloatTensor , lowerCamelCase : float = 0.0 , lowerCamelCase : bool = True , lowerCamelCase : Any=None , lowerCamelCase : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCAmelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCAmelCase = self.alphas_cumprod[timestep]
lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase = torch.clamp(lowerCamelCase , -scale , lowerCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCAmelCase = self._get_variance(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCAmelCase = model_output.device if torch.is_tensor(lowerCamelCase ) else 'cpu'
lowerCAmelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase ).to(lowerCamelCase )
lowerCAmelCase = self._get_variance(lowerCamelCase , lowerCamelCase ) ** 0.5 * eta * noise
lowerCAmelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
def a_ ( self : List[Any] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : int , lowerCamelCase : torch.FloatTensor , lowerCamelCase : List[Any]="epsilon" , lowerCamelCase : Optional[Any]=None , lowerCamelCase : bool = True , ):
lowerCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase = torch.split(lowerCamelCase , sample.shape[1] , dim=1 )
else:
lowerCAmelCase = None
# 1. compute alphas, betas
lowerCAmelCase = self.alphas_cumprod[t]
lowerCAmelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCAmelCase = 1 - alpha_prod_t
lowerCAmelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCAmelCase = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase = torch.clamp(lowerCamelCase , -scale , lowerCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCAmelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase = 0
if t > 0:
lowerCAmelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCamelCase ).to(model_output.device )
lowerCAmelCase = (self._get_variance(lowerCamelCase , predicted_variance=lowerCamelCase ) ** 0.5) * noise
lowerCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : int , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, DDPMScheduler] , UpperCAmelCase__ : Optional[float] = 1.0 , ) -> int:
super().__init__()
lowerCAmelCase = bit_scale
lowerCAmelCase = (
ddim_bit_scheduler_step if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : Tuple , UpperCAmelCase__ : Optional[int] = 2_5_6 , UpperCAmelCase__ : Optional[int] = 2_5_6 , UpperCAmelCase__ : Optional[int] = 5_0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : str , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase__ , )
lowerCAmelCase = decimal_to_bits(UpperCAmelCase__ ) * self.bit_scale
lowerCAmelCase = latents.to(self.device )
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCAmelCase = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample
lowerCAmelCase = bits_to_decimal(UpperCAmelCase__ )
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase: Optional[int] = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Dict = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: int = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a( A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = 384
a = 7
if "tiny" in model_name:
a = 96
a = (2, 2, 6, 2)
a = (3, 6, 12, 24)
elif "small" in model_name:
a = 96
a = (2, 2, 18, 2)
a = (3, 6, 12, 24)
elif "base" in model_name:
a = 128
a = (2, 2, 18, 2)
a = (4, 8, 16, 32)
a = 12
a = 512
elif "large" in model_name:
a = 192
a = (2, 2, 18, 2)
a = (6, 12, 24, 48)
a = 12
a = 768
# set label information
a = 150
a = "huggingface/label-files"
a = "ade20k-id2label.json"
a = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = {v: k for k, v in idalabel.items()}
a = SwinConfig(
embed_dim=A , depths=A , num_heads=A , window_size=A , out_features=["stage1", "stage2", "stage3", "stage4"] , )
a = UperNetConfig(
backbone_config=A , auxiliary_in_channels=A , num_labels=A , idalabel=A , labelaid=A , )
return config
def a( A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def a( A : List[str] , A : List[str] , A : Dict ) -> Any:
"""simple docstring"""
a = dct.pop(A )
a = val
def a( A : str , A : List[str] ) -> List[Any]:
"""simple docstring"""
a = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
a = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[:dim, :]
a = in_proj_bias[: dim]
a = in_proj_weight[
dim : dim * 2, :
]
a = in_proj_bias[
dim : dim * 2
]
a = in_proj_weight[
-dim :, :
]
a = in_proj_bias[-dim :]
# fmt: on
def a( A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
a , a = x.shape
a = x.reshape(A , 4 , in_channel // 4 )
a = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(A , A )
return x
def a( A : int ) -> Dict:
"""simple docstring"""
a , a = x.shape
a = x.reshape(A , in_channel // 4 , 4 )
a = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(A , A )
return x
def a( A : List[Any] ) -> Dict:
"""simple docstring"""
a = x.shape[0]
a = x.reshape(4 , in_channel // 4 )
a = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(A )
return x
def a( A : Optional[Any] ) -> List[str]:
"""simple docstring"""
a = x.shape[0]
a = x.reshape(in_channel // 4 , 4 )
a = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(A )
return x
def a( A : Any , A : int , A : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
a = model_name_to_url[model_name]
a = torch.hub.load_state_dict_from_url(A , map_location="cpu" , file_name=A )[
"state_dict"
]
for name, param in state_dict.items():
print(A , param.shape )
a = get_upernet_config(A )
a = UperNetForSemanticSegmentation(A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a = state_dict.pop(A )
if "bn" in key:
a = key.replace("bn" , "batch_norm" )
a = val
# rename keys
a = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
a = reverse_correct_unfold_reduction_order(A )
if "norm" in key:
a = reverse_correct_unfold_norm_order(A )
model.load_state_dict(A )
# verify on image
a = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
a = Image.open(requests.get(A , stream=A ).raw ).convert("RGB" )
a = SegformerImageProcessor()
a = processor(A , return_tensors="pt" ).pixel_values
with torch.no_grad():
a = model(A )
a = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
a = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
a = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
a = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
a = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_lowercase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase: int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 71 | 1 |
import requests
from bsa import BeautifulSoup
def _lowercase ( lowercase__ = "AAPL" ):
__lowerCAmelCase : str = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__lowerCAmelCase : int = BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
__lowerCAmelCase : Union[str, Any] = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 275 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a_ : List[Any] = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def a_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json'''
lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys()
return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) )
def a_ ( ) -> str:
"""simple docstring"""
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase_ =Path(__snake_case ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]:
"""simple docstring"""
init_hf_modules()
lowerCamelCase_ =Path(__snake_case ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase_ =dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def a_ ( __snake_case : Tuple ) -> List[str]:
"""simple docstring"""
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =f.read()
# Imports of the form `import .xxx`
lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE )
# Unique-ify
return list(set(__snake_case ) )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =[module_file]
lowerCamelCase_ =[]
# Let's recurse through all relative imports
while not no_change:
lowerCamelCase_ =[]
for f in files_to_check:
new_imports.extend(get_relative_imports(__snake_case ) )
lowerCamelCase_ =Path(__snake_case ).parent
lowerCamelCase_ =[str(module_path / m ) for m in new_imports]
lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports]
lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files]
lowerCamelCase_ =len(__snake_case ) == 0
all_relative_imports.extend(__snake_case )
return all_relative_imports
def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =f.read()
# Imports of the form `import xxx`
lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE )
# Only keep the top-level module
lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowerCamelCase_ =list(set(__snake_case ) )
lowerCamelCase_ =[]
for imp in imports:
try:
importlib.import_module(__snake_case )
except ImportError:
missing_packages.append(__snake_case )
if len(__snake_case ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' )
return get_relative_imports(__snake_case )
def a_ ( __snake_case : Tuple , __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' )
lowerCamelCase_ =importlib.import_module(__snake_case )
if class_name is None:
return find_pipeline_class(__snake_case )
return getattr(__snake_case , __snake_case )
def a_ ( __snake_case : Dict ) -> Any:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) )
lowerCamelCase_ =None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __snake_case )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
lowerCamelCase_ =cls
return pipeline_class
def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =str(__snake_case )
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ):
lowerCamelCase_ =module_file_or_url
lowerCamelCase_ ='''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowerCamelCase_ =get_diffusers_versions()
# cut ".dev0"
lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowerCamelCase_ =F'''v{revision}'''
elif revision == "main":
lowerCamelCase_ =revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case )
try:
lowerCamelCase_ =cached_download(
__snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , )
lowerCamelCase_ ='''git'''
lowerCamelCase_ =pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowerCamelCase_ =hf_hub_download(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , )
lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowerCamelCase_ =check_imports(__snake_case )
# Now we move the module inside our cached dynamic modules.
lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__snake_case )
lowerCamelCase_ =Path(__snake_case ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__snake_case , submodule_path / module_file )
for module_needed in modules_needed:
lowerCamelCase_ =F'''{module_needed}.py'''
shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =use_auth_token
elif use_auth_token is True:
lowerCamelCase_ =HfFolder.get_token()
else:
lowerCamelCase_ =None
lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCamelCase_ =submodule_path / commit_hash
lowerCamelCase_ =full_submodule + os.path.sep + commit_hash
create_dynamic_module(__snake_case )
if not (submodule_path / module_file).exists():
shutil.copy(__snake_case , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
return os.path.join(__snake_case , __snake_case )
def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_cached_module_file(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
| 75 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : Optional[Any] =1
lowerCamelCase__ : int =3
lowerCamelCase__ : str =(32, 32)
lowerCamelCase__ : Optional[Any] =floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(lowerCamelCase )
return image
@property
def snake_case ( self : int )-> Tuple:
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
return model
@property
def snake_case ( self : int )-> Tuple:
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
return model
@property
def snake_case ( self : Optional[Any] )-> int:
torch.manual_seed(0 )
lowerCamelCase__ : str =RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5006, )
return RobertaSeriesModelWithTransformation(lowerCamelCase )
@property
def snake_case ( self : Tuple )-> Any:
def extract(*lowerCamelCase : List[str], **lowerCamelCase : Optional[int] ):
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] )-> Dict:
lowerCamelCase__ : int =torch.ones([0] )
def snake_case ( self : Union[str, Any], lowerCamelCase : List[Any] )-> Any:
self.pixel_values.to(lowerCamelCase )
return self
return Out()
return extract
def snake_case ( self : List[Any] )-> Tuple:
lowerCamelCase__ : Optional[Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Any =self.dummy_cond_unet
lowerCamelCase__ : Dict =PNDMScheduler(skip_prk_steps=lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.dummy_vae
lowerCamelCase__ : int =self.dummy_text_encoder
lowerCamelCase__ : str =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase__ : Union[str, Any] =77
lowerCamelCase__ : Union[str, Any] =self.dummy_image.to(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase__ : Optional[Any] =AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
lowerCamelCase__ : Optional[Any] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase )
lowerCamelCase__ : Dict =alt_pipe.to(lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Dict ='''A painting of a squirrel eating a burger'''
lowerCamelCase__ : Any =torch.Generator(device=lowerCamelCase ).manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', image=lowerCamelCase, )
lowerCamelCase__ : int =output.images
lowerCamelCase__ : Optional[int] =torch.Generator(device=lowerCamelCase ).manual_seed(0 )
lowerCamelCase__ : Optional[int] =alt_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', image=lowerCamelCase, return_dict=lowerCamelCase, )[0]
lowerCamelCase__ : Union[str, Any] =image[0, -3:, -3:, -1]
lowerCamelCase__ : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Union[str, Any] =np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' )
def snake_case ( self : Tuple )-> Dict:
lowerCamelCase__ : int =self.dummy_cond_unet
lowerCamelCase__ : Dict =PNDMScheduler(skip_prk_steps=lowerCamelCase )
lowerCamelCase__ : Dict =self.dummy_vae
lowerCamelCase__ : Dict =self.dummy_text_encoder
lowerCamelCase__ : List[str] =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase__ : Tuple =77
lowerCamelCase__ : str =self.dummy_image.to(lowerCamelCase )
# put models in fp16
lowerCamelCase__ : List[Any] =unet.half()
lowerCamelCase__ : List[Any] =vae.half()
lowerCamelCase__ : List[Any] =bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase__ : Any =AltDiffusionImgaImgPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
lowerCamelCase__ : Any =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCamelCase )
lowerCamelCase__ : List[Any] =alt_pipe.to(lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Tuple ='''A painting of a squirrel eating a burger'''
lowerCamelCase__ : str =torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =alt_pipe(
[prompt], generator=lowerCamelCase, num_inference_steps=2, output_type='''np''', image=lowerCamelCase, ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' )
def snake_case ( self : List[Any] )-> List[Any]:
lowerCamelCase__ : List[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase__ : List[Any] =init_image.resize((760, 504) )
lowerCamelCase__ : Dict ='''BAAI/AltDiffusion'''
lowerCamelCase__ : Tuple =AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ : List[str] ='''A fantasy landscape, trending on artstation'''
lowerCamelCase__ : str =torch.manual_seed(0 )
lowerCamelCase__ : Tuple =pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.75, guidance_scale=7.5, generator=lowerCamelCase, output_type='''np''', )
lowerCamelCase__ : str =output.images[0]
lowerCamelCase__ : List[Any] =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowerCamelCase__ : Any =np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[Any] )-> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Any )-> List[str]:
lowerCamelCase__ : Optional[int] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase__ : List[str] =init_image.resize((768, 512) )
lowerCamelCase__ : Any =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase__ : Optional[int] ='''BAAI/AltDiffusion'''
lowerCamelCase__ : int =AltDiffusionImgaImgPipeline.from_pretrained(
lowerCamelCase, safety_checker=lowerCamelCase, )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ : List[str] ='''A fantasy landscape, trending on artstation'''
lowerCamelCase__ : Union[str, Any] =torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] =pipe(
prompt=lowerCamelCase, image=lowerCamelCase, strength=0.75, guidance_scale=7.5, generator=lowerCamelCase, output_type='''np''', )
lowerCamelCase__ : Any =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 272 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ : Optional[Any] ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
lowerCamelCase__ : Any =f'''{src_lang}-{tgt_lang}'''
lowerCamelCase__ : Any =f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
lowerCamelCase__ : str =os.path.join(__lowerCamelCase , '''README.md''' )
print(f'''Generating {path}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__lowerCamelCase )
# make sure we are under the root of the project
_lowercase : List[str] = Path(__file__).resolve().parent.parent.parent
_lowercase : Dict = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowercase : int = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 272 | 1 |
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Union[str, Any] = tmp_path / '''file.csv'''
lowerCAmelCase__ : List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(A_ , '''w''' ) as f:
f.write(A_ )
return str(A_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Optional[Any] = tmp_path / '''malformed_file.csv'''
lowerCAmelCase__ : Tuple = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(A_ , '''w''' ) as f:
f.write(A_ )
return str(A_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Optional[int] = tmp_path / '''csv_with_image.csv'''
lowerCAmelCase__ : Dict = textwrap.dedent(
f'\\n image\n {image_file}\n ' )
with open(A_ , '''w''' ) as f:
f.write(A_ )
return str(A_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[str] = tmp_path / '''csv_with_label.csv'''
lowerCAmelCase__ : Tuple = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(A_ , '''w''' ) as f:
f.write(A_ )
return str(A_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Tuple = tmp_path / '''csv_with_int_list.csv'''
lowerCAmelCase__ : Dict = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(A_ , '''w''' ) as f:
f.write(A_ )
return str(A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Any = Csv()
lowerCAmelCase__ : Union[str, Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(A_ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(A_ ) in record.message
for record in caplog.records )
@require_pil
def __SCREAMING_SNAKE_CASE ( A_ ):
with open(A_ , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : int = f.read().splitlines()[1]
lowerCAmelCase__ : Tuple = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
lowerCAmelCase__ : Any = csv._generate_tables([[csv_file_with_image]] )
lowerCAmelCase__ : str = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
lowerCAmelCase__ : List[Any] = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def __SCREAMING_SNAKE_CASE ( A_ ):
with open(A_ , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : List[Any] = f.read().splitlines()[1:]
lowerCAmelCase__ : str = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
lowerCAmelCase__ : str = csv._generate_tables([[csv_file_with_label]] )
lowerCAmelCase__ : List[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
lowerCAmelCase__ : Optional[Any] = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(A_ ) for label in labels]
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Any = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda A_ : [int(A_ ) for i in x.split()]} )
lowerCAmelCase__ : int = csv._generate_tables([[csv_file_with_int_list]] )
lowerCAmelCase__ : List[Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
lowerCAmelCase__ : List[Any] = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 106 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__UpperCamelCase : str = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106 | 1 |
from __future__ import annotations
def lowercase_ ( A__ , A__ , A__ , A__ ) -> Dict: # noqa: E741
"""simple docstring"""
while r - l > 1:
snake_case = (l + r) // 2
if v[m] >= key:
snake_case = m
else:
snake_case = m # noqa: E741
return r
def lowercase_ ( A__ ) -> int:
"""simple docstring"""
if len(A__ ) == 0:
return 0
snake_case = [0] * len(A__ )
snake_case = 1
snake_case = v[0]
for i in range(1 , len(A__ ) ):
if v[i] < tail[0]:
snake_case = v[i]
elif v[i] > tail[length - 1]:
snake_case = v[i]
length += 1
else:
snake_case = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 137 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( __a , unittest.TestCase ):
A = CTRLTokenizer
A = False
A = False
def __snake_case (self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_: List[Any] = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_: List[str] = dict(zip(_UpperCamelCase, range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_: str = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_: Optional[int] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_: str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_: Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_UpperCamelCase ) )
def __snake_case (self, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname, **_UpperCamelCase )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Optional[int] = """adapt react readapt apt"""
UpperCAmelCase_: int = """adapt react readapt apt"""
return input_text, output_text
def __snake_case (self ) -> int:
UpperCAmelCase_: List[Any] = CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCAmelCase_: Union[str, Any] = """adapt react readapt apt"""
UpperCAmelCase_: Union[str, Any] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_: Optional[Any] = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase, _UpperCamelCase )
UpperCAmelCase_: int = tokens + [tokenizer.unk_token]
UpperCAmelCase_: Tuple = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ), _UpperCamelCase )
| 147 |
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Dict ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : int ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : Dict , **__lowerCAmelCase : Any ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Dict ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
def lowerCamelCase__ ( *__lowerCAmelCase : int , **__lowerCAmelCase : Any ):
"""simple docstring"""
requires_backends(__lowerCAmelCase , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class _lowerCAmelCase ( metaclass=__a ):
_lowercase =['''torch''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
| 231 | 0 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ (__lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
_SCREAMING_SNAKE_CASE : List[Any] = BeautifulSoup(requests.get(__lowerCamelCase ).text, "html.parser" )
_SCREAMING_SNAKE_CASE : List[Any] = soup.findAll("h1" )
_SCREAMING_SNAKE_CASE : Any = soup.findAll("div", {"class": "maincounter-number"} )
keys += soup.findAll("span", {"class": "panel-title"} )
values += soup.findAll("div", {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCamelCase, __lowerCamelCase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"{key}\n{value}\n")
| 357 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 325 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A_ :List[str] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
A_ :Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
A_ :Tuple = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
A_ :List[str] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
A_ :Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=[1, 10, 100] , lowerCamelCase__=4 , lowerCamelCase__=3.0 ):
"""simple docstring"""
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Any =Counter()
__UpperCamelCase : List[Any] =0
__UpperCamelCase : int =defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
for candidate in candidates:
__UpperCamelCase : str =candidate + '\n' + test_case
__UpperCamelCase : Any =(test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase : Optional[Any] =executor.submit(lowerCamelCase__ , *lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
__UpperCamelCase : str =future.result()
results[result["task_id"]].append((result['completion_id'], result) )
__UpperCamelCase , __UpperCamelCase : int =[], []
for result in results.values():
result.sort()
__UpperCamelCase : str =[r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =np.array(lowerCamelCase__ )
__UpperCamelCase : List[str] =np.array(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =k
__UpperCamelCase : List[Any] ={f'pass@{k}': estimate_pass_at_k(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
def estimator(a_ ,a_ ,a_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(a_ ,a_ ):
__UpperCamelCase : Optional[int] =itertools.repeat(a_ ,len(a_ ) )
else:
assert len(a_ ) == len(a_ )
__UpperCamelCase : List[Any] =iter(a_ )
return np.array([estimator(int(a_ ) ,int(a_ ) ,a_ ) for n, c in zip(a_ ,a_ )] )
| 71 |
import re
def A ( a_ ) -> bool:
__UpperCamelCase : Any =re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(a_ ,a_ ) )
if __name__ == "__main__":
A_ :List[str] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 71 | 1 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_A : Union[str, Any] =HfApi()
_A : Tuple ={}
# fmt: off
_A : Dict =torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_A : Union[str, Any] =torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_A : Any =torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_A : int =torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_A : Tuple =torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_A : List[Any] =torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_A : str =torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_A : Dict =torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_A : List[Any] =torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_A : Optional[int] =torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_A : List[Any] =torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_A : Union[str, Any] =torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_A : Union[str, Any] =torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_A : str =torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_A : Any =torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_A : Union[str, Any] =api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_A : int ='''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_A : List[str] =UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_A : Optional[Any] =UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_A : Optional[int] =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_A : int =torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_A : Any =model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3
)
print(F'{mod.modelId} has passed successfully!!!')
| 129 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
_A : Dict =parser.parse_args()
_A : List[str] =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_A : Any =CLIPImageProcessor()
_A : Union[str, Any] =CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
_A : Union[str, Any] =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 129 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCAmelCase__ = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCAmelCase : Any = 'lm_head'
lowerCAmelCase : Any = getattr(__a , __a )
if weight_type is not None:
lowerCAmelCase : List[Any] = getattr(__a , __a ).shape
else:
lowerCAmelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase : List[Any] = value
elif weight_type == "weight_g":
lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_v":
lowerCAmelCase : Dict = value
elif weight_type == "bias":
lowerCAmelCase : Optional[Any] = value
else:
lowerCAmelCase : Any = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : List[str] = fairseq_model.state_dict()
lowerCAmelCase : List[Any] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase : Any = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase : Optional[int] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
lowerCAmelCase : Dict = name.split(__a )[0].split("." )[-2]
lowerCAmelCase : Optional[int] = mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCAmelCase : List[Any] = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase : str = 'weight_v'
elif "bias" in name:
lowerCAmelCase : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase : str = 'weight'
else:
lowerCAmelCase : List[str] = None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Any = full_name.split("conv_layers." )[-1]
lowerCAmelCase : int = name.split("." )
lowerCAmelCase : Optional[Any] = int(items[0] )
lowerCAmelCase : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=True ):
'''simple docstring'''
if config_path is not None:
lowerCAmelCase : Optional[Any] = UniSpeechConfig.from_pretrained(__a )
else:
lowerCAmelCase : str = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCAmelCase : int = Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase : Optional[int] = target_dict.pad_index
lowerCAmelCase : Union[str, Any] = target_dict.bos_index
lowerCAmelCase : List[str] = target_dict.eos_index
lowerCAmelCase : List[str] = len(target_dict.symbols )
lowerCAmelCase : Optional[Any] = os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCAmelCase : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase : Optional[Any] = 4_2
lowerCAmelCase : Any = 4_3
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCAmelCase : Tuple = WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCAmelCase : str = True if config.feat_extract_norm == 'layer' else False
lowerCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCAmelCase : int = WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCAmelCase : str = UniSpeechForCTC(__a )
else:
lowerCAmelCase : str = UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCAmelCase : Union[str, Any] = model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 108 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCAmelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = 'https://pypi.org/pypi/diffusers/json'
_a : int = json.loads(request.urlopen(__a ).read() )['releases'].keys()
return sorted(__a , key=lambda __a : version.Version(__a ) )
def UpperCAmelCase_ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__a )
os.makedirs(__a , exist_ok=__a )
_a : str = Path(__a ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_a : Dict = Path(__a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__a , exist_ok=__a )
_a : Optional[int] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : int = f.read()
# Imports of the form `import .xxx`
_a : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , __a , flags=re.MULTILINE )
# Unique-ify
return list(set(__a ) )
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[int] = False
_a : Optional[int] = [module_file]
_a : List[str] = []
# Let's recurse through all relative imports
while not no_change:
_a : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__a ) )
_a : Union[str, Any] = Path(__a ).parent
_a : str = [str(module_path / m ) for m in new_imports]
_a : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_a : Dict = [f"""{f}.py""" for f in new_import_files]
_a : List[str] = len(__a ) == 0
all_relative_imports.extend(__a )
return all_relative_imports
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.read()
# Imports of the form `import xxx`
_a : Optional[int] = re.findall('^\s*import\s+(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , __a , flags=re.MULTILINE )
# Only keep the top-level module
_a : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
_a : Optional[int] = list(set(__a ) )
_a : List[str] = []
for imp in imports:
try:
importlib.import_module(__a )
except ImportError:
missing_packages.append(__a )
if len(__a ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{', '.join(__a )}. Run `pip install {' '.join(__a )}`""" )
return get_relative_imports(__a )
def UpperCAmelCase_ (__a : Any , __a : str ):
"""simple docstring"""
_a : Any = module_path.replace(os.path.sep , '.' )
_a : Union[str, Any] = importlib.import_module(__a )
if class_name is None:
return find_pipeline_class(__a )
return getattr(__a , __a )
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_a : List[str] = dict(inspect.getmembers(__a , inspect.isclass ) )
_a : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __a )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
_a : Any = cls
return pipeline_class
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , ):
"""simple docstring"""
_a : str = str(__a )
_a : Optional[Any] = os.path.join(__a , __a )
if os.path.isfile(__a ):
_a : Tuple = module_file_or_url
_a : Optional[Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
_a : int = get_diffusers_versions()
# cut ".dev0"
_a : Any = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
_a : Any = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Any = f"""v{revision}"""
elif revision == "main":
_a : Optional[int] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__a , pipeline=__a )
try:
_a : Any = cached_download(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = 'git'
_a : Any = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Optional[Any] = hf_hub_download(
__a , __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Optional[int] = check_imports(__a )
# Now we move the module inside our cached dynamic modules.
_a : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__a )
_a : Any = Path(__a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__a , submodule_path / module_file )
for module_needed in modules_needed:
_a : Dict = f"""{module_needed}.py"""
shutil.copy(os.path.join(__a , __a ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__a , __a ):
_a : Optional[Any] = use_auth_token
elif use_auth_token is True:
_a : List[Any] = HfFolder.get_token()
else:
_a : Dict = None
_a : int = model_info(__a , revision=__a , token=__a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : Optional[int] = submodule_path / commit_hash
_a : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__a )
if not (submodule_path / module_file).exists():
shutil.copy(__a , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__a , f"""{module_needed}.py""" , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return os.path.join(__a , __a )
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[str] = None , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , **__a : str , ):
"""simple docstring"""
_a : Dict = get_cached_module_file(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return get_class_in_module(__a , final_module.replace('.py' , '' ) )
| 271 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Any):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = LxmertConfig.from_json_file(lowerCamelCase_)
print(f"""Building PyTorch model from configuration: {config}""")
lowerCAmelCase__ : Optional[Any] = LxmertForPreTraining(lowerCamelCase_)
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() ,lowerCamelCase_)
if __name__ == "__main__":
__snake_case : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__snake_case : Dict =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 94 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : str ={
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['ConditionalDetrFeatureExtractor']
__snake_case : Union[str, Any] =['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =[
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 | 1 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(number**0.5)
return number == sq * sq
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE = x_den * y_den * z_den
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase__ (_UpperCAmelCase = 35):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = Fraction(0)
SCREAMING_SNAKE_CASE = 42
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
SCREAMING_SNAKE_CASE = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE = x_den * y_den
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
# n=2
SCREAMING_SNAKE_CASE = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE = x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase) and is_sq(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
# n=-1
SCREAMING_SNAKE_CASE = x_num * y_num
SCREAMING_SNAKE_CASE = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
# n=2
SCREAMING_SNAKE_CASE = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase) and is_sq(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
for num, den in unique_s:
total += Fraction(_UpperCAmelCase , _UpperCAmelCase)
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 137 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase__ (_UpperCAmelCase=None):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser('env')
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Accelerate env command')
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help='The config file to use for the default values in the launching script.')
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase)
return parser
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.__version__
SCREAMING_SNAKE_CASE = torch.cuda.is_available()
SCREAMING_SNAKE_CASE = is_xpu_available()
SCREAMING_SNAKE_CASE = is_npu_available()
SCREAMING_SNAKE_CASE = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file).to_dict()
SCREAMING_SNAKE_CASE = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(_UpperCAmelCase),
'PyTorch NPU available': str(_UpperCAmelCase),
'System RAM': F'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''',
}
if pt_cuda_available:
SCREAMING_SNAKE_CASE = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n')
print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()]))
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:')
SCREAMING_SNAKE_CASE = (
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()])
if isinstance(_UpperCAmelCase , _UpperCAmelCase)
else F'''\t{accelerate_config}'''
)
print(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = accelerate_config
return info
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = env_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
env_command(_UpperCAmelCase)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 137 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase_ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
UpperCAmelCase_ = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Dict:
UpperCamelCase__ : Optional[int] = list(state_dict.keys() )
for name in state_dict_keys:
UpperCamelCase__ : int = state_dict.pop(__UpperCAmelCase )
# emb -> embedding
if name.startswith('''emb.''' ):
UpperCamelCase__ : int = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
UpperCamelCase__ : Union[str, Any] = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
UpperCamelCase__ : List[str] = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , __UpperCAmelCase )
# ffn -> feed_forward
UpperCamelCase__ : Optional[int] = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , __UpperCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
UpperCamelCase__ : Optional[Any] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
UpperCamelCase__ : List[Any] = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
UpperCamelCase__ : Dict = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
UpperCamelCase__ : Union[str, Any] = '''rwkv.''' + name
UpperCamelCase__ : Tuple = weight
return state_dict
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: str , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Optional[Any]=None , __UpperCAmelCase: Optional[int]=None , __UpperCAmelCase: List[Any]=False , __UpperCAmelCase: Optional[int]=None ) -> List[str]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
UpperCamelCase__ : Dict = 5_0277
UpperCamelCase__ : int = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
UpperCamelCase__ : str = PreTrainedTokenizerFast(tokenizer_file=__UpperCAmelCase )
UpperCamelCase__ : str = len(__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
# 2. Build the config
UpperCamelCase__ : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCamelCase__ : List[Any] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
UpperCamelCase__ : Dict = RwkvConfig(
vocab_size=__UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__UpperCAmelCase )
# 3. Download model file then convert state_dict
UpperCamelCase__ : int = hf_hub_download(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = torch.load(__UpperCAmelCase , map_location='''cpu''' )
UpperCamelCase__ : int = convert_state_dict(__UpperCAmelCase )
# 4. Split in shards and save
UpperCamelCase__ ,UpperCamelCase__ : List[str] = shard_checkpoint(__UpperCAmelCase )
for shard_file, shard in shards.items():
torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
if index is not None:
UpperCamelCase__ : Tuple = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
# Save the index as well
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ : Optional[int] = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + '''\n'''
f.write(__UpperCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
UpperCamelCase__ : str = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCamelCase__ : Any = torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
UpperCamelCase__ : Optional[int] = AutoModelForCausalLM.from_pretrained(__UpperCAmelCase )
model.push_to_hub(__UpperCAmelCase , max_shard_size='''2GB''' )
tokenizer.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
UpperCAmelCase_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 247 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase__ : List[str] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
UpperCamelCase__ : Dict = Dataset.from_dict(__UpperCAmelCase )
return dataset
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[Any] = get_dataset()
UpperCamelCase__ : List[str] = make_duplicate_clusters(__magic_name__, 0.85 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = get_dataset()
UpperCamelCase__ ,UpperCamelCase__ : Dict = deduplicate_dataset(__magic_name__ )
self.assertEqual(len(__magic_name__ ), 2 )
print(__magic_name__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], __magic_name__ )
| 247 | 1 |
import re
from filelock import FileLock
try:
import nltk
_SCREAMING_SNAKE_CASE = True
except (ImportError, ModuleNotFoundError):
_SCREAMING_SNAKE_CASE = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def SCREAMING_SNAKE_CASE__ ( __a ):
re.sub('<n>' , '' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 327 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = LxmertConfig.from_json_file(__UpperCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE__ = LxmertForPreTraining(__UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCamelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 204 |
from __future__ import annotations
__lowerCamelCase : Tuple = list[list[int]]
# assigning initial values to the grid
__lowerCamelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowerCamelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = digit
if sudoku(__UpperCamelCase ) is not None:
return grid
SCREAMING_SNAKE_CASE__ = 0
return None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__UpperCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__lowerCamelCase : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 204 | 1 |
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple):
'''simple docstring'''
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : Optional[int] = 2
while i * i <= n:
lowerCAmelCase__ : Union[str, Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : List[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase_) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 129 |
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : List[str] = 1
while len(lowerCamelCase_) < 1E6:
constant.append(str(lowerCamelCase_))
i += 1
lowerCAmelCase__ : Union[str, Any] = ''''''.join(lowerCamelCase_)
return (
int(constant[0])
* int(constant[9])
* int(constant[99])
* int(constant[999])
* int(constant[9999])
* int(constant[99999])
* int(constant[999999])
)
if __name__ == "__main__":
print(solution())
| 129 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
__SCREAMING_SNAKE_CASE = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
__SCREAMING_SNAKE_CASE = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def UpperCAmelCase ( _lowerCamelCase ):
A : Optional[Any] = (images / 2 + 0.5).clamp(0 , 1 )
A : int = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A : Tuple = numpy_to_pil(_lowerCamelCase )
return images
def UpperCAmelCase ( _lowerCamelCase ):
if images.ndim == 3:
A : Optional[Any] = images[None, ...]
A : str = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
A : List[Any] = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
A : Tuple = [Image.fromarray(_lowerCamelCase ) for image in images]
return pil_images
| 256 |
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A , A : Optional[Any] = len(_lowerCamelCase ), len(grid[0] )
if (
min(_lowerCamelCase , _lowerCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A : Tuple = 0
count += depth_first_search(_lowerCamelCase , row + 1 , _lowerCamelCase , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , row - 1 , _lowerCamelCase , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , _lowerCamelCase , col + 1 , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , _lowerCamelCase , col - 1 , _lowerCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case : Dict = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
snake_case : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 94 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(self , **_lowerCamelCase )
a :Union[str, Any] = repo_info
a :int = token
a :int = None
def SCREAMING_SNAKE_CASE__ ( self ):
if self.dir_cache is None:
a :Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a :List[Any] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ):
if not isinstance(self.repo_info , _lowerCamelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ):
self._get_dirs()
a :Union[str, Any] = self._strip_protocol(_lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ):
self._get_dirs()
a :str = PurePosixPath(path.strip('''/''' ) )
a :Tuple = {}
for p, f in self.dir_cache.items():
a :Optional[int] = PurePosixPath(p.strip('''/''' ) )
a :str = p.parent
if root == path:
a :List[str] = f
a :Any = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 94 | 1 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : int = 0
_UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_UpperCAmelCase : Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase_ ):
return None
_UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_UpperCAmelCase : int = left
_UpperCAmelCase : Union[str, Any] = point
elif point > right:
_UpperCAmelCase : Union[str, Any] = right
_UpperCAmelCase : List[Any] = point
else:
if item < current_item:
_UpperCAmelCase : Any = point - 1
else:
_UpperCAmelCase : Dict = point + 1
return None
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase_ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
elif point > right:
return interpolation_search_by_recursion(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase_ , lowercase_ , lowercase_ , point - 1 )
else:
return interpolation_search_by_recursion(
lowercase_ , lowercase_ , point + 1 , lowercase_ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
'''simple docstring'''
if collection != sorted(lowercase_ ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
_lowerCAmelCase : Any = 0
if debug == 1:
_lowerCAmelCase : Dict = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCAmelCase : Union[str, Any] = 67
_lowerCAmelCase : Optional[int] = interpolation_search(collection, target)
if result is not None:
print(F"{target} found at positions: {result}")
else:
print("Not found")
| 357 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
_lowerCAmelCase : Tuple = json.load(f)
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : str , A : Union[str, Any] ):
return FSMTTokenizer.from_pretrained(A )
def snake_case_ ( self : Union[str, Any] , A : Union[str, Any] ):
_UpperCAmelCase : List[Any] = FSMTForConditionalGeneration.from_pretrained(A ).to(A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def snake_case_ ( self : Any , A : Dict , A : List[str] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_UpperCAmelCase : Any = f'facebook/wmt19-{pair}'
_UpperCAmelCase : Dict = self.get_tokenizer(A )
_UpperCAmelCase : Optional[int] = self.get_model(A )
_UpperCAmelCase : int = bleu_data[pair]["src"]
_UpperCAmelCase : Optional[int] = bleu_data[pair]["tgt"]
_UpperCAmelCase : List[str] = tokenizer(A , return_tensors="pt" , truncation=A , padding="longest" ).to(A )
_UpperCAmelCase : List[str] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_UpperCAmelCase : Any = tokenizer.batch_decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A )
_UpperCAmelCase : Any = calculate_bleu(A , A )
print(A )
self.assertGreaterEqual(scores["bleu"] , A )
| 202 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
from torch.utils.cpp_extension import load
A__ = Path(lowercase_ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
A__ = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , lowercase_ , with_cuda=lowercase_ , extra_include_paths=[str(lowercase_ )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 247 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = len(lowercase_ )
while cur > 1:
# Find the maximum number in arr
A__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A__ = arr[mi::-1] + arr[mi + 1 : len(lowercase_ )]
# Reverse whole list
A__ = arr[cur - 1 :: -1] + arr[cur : len(lowercase_ )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 247 | 1 |
"""simple docstring"""
class _lowercase : # Public class to implement a graph
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[list[bool]] ) -> None:
'''simple docstring'''
__UpperCamelCase =row
__UpperCamelCase =col
__UpperCamelCase =graph
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[list[bool]] ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[list[bool]] ) -> None:
'''simple docstring'''
__UpperCamelCase =[-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__UpperCamelCase =[-1, 0, 1, -1, 1, -1, 0, 1]
__UpperCamelCase =True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> int: # And finally, count all islands.
'''simple docstring'''
__UpperCamelCase =[[False for j in range(self.COL )] for i in range(self.ROW )]
__UpperCamelCase =0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
count += 1
return count
| 366 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = LongformerTokenizer
lowercase__ = True
lowercase__ = LongformerTokenizerFast
lowercase__ = True
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase ={'''unk_token''': '''<unk>'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] , **UpperCamelCase__ : str ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase ='''lower newer'''
__UpperCamelCase ='''lower newer'''
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='''lower newer'''
__UpperCamelCase =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCamelCase =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase ='''Encode this sequence.'''
__UpperCamelCase =tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing spaces after special tokens
__UpperCamelCase ='''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )} ) # mask token has a left space
__UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
__UpperCamelCase ='''Encode <mask> sequence'''
__UpperCamelCase ='''Encode <mask>sequence'''
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase ='''A, <mask> AllenNLP sentence.'''
__UpperCamelCase =tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
__UpperCamelCase =tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase =f"""{text_of_1_token} {text_of_1_token}"""
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
| 85 | 0 |
from string import ascii_uppercase
lowerCamelCase : Any = {str(ord(c) - 55): c for c in ascii_uppercase}
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(lowercase , lowercase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(lowercase , lowercase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowerCamelCase_ = ''
lowerCamelCase_ = 0
lowerCamelCase_ = 0
while div != 1:
lowerCamelCase_ , lowerCamelCase_ = divmod(lowercase , lowercase )
if base >= 11 and 9 < mod < 36:
lowerCamelCase_ = ALPHABET_VALUES[str(lowercase )]
else:
lowerCamelCase_ = str(lowercase )
new_value += actual_value
lowerCamelCase_ = num // base
lowerCamelCase_ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(lowercase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 204 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase : List[Any] = None
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Optional[int] = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
lowerCamelCase : List[Any] = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
lowerCamelCase : Any = "▁"
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''token_type_ids''']
UpperCamelCase = FNetTokenizer
def __init__( self : Optional[int] , A_ : Any=None , A_ : int=None , A_ : int=False , A_ : Optional[int]=True , A_ : List[Any]=True , A_ : Tuple="<unk>" , A_ : Optional[int]="[SEP]" , A_ : List[Any]="<pad>" , A_ : Optional[int]="[CLS]" , A_ : Optional[Any]="[MASK]" , **A_ : Dict , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = (
AddedToken(A_ , lstrip=A_ , rstrip=A_ , normalized=A_ )
if isinstance(A_ , A_ )
else mask_token
)
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : Dict , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 204 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : int = len(__a ) + 1
_a : List[str] = len(__a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_a : Optional[int] = [[0 for i in range(__a )] for j in range(__a )]
# since string of zero length match pattern of zero length
_a : str = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __a ):
_a : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __a ):
_a : Dict = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __a ):
for j in range(1 , __a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_a : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_a : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_a : int = dp[i - 1][j]
else:
_a : Any = 0
else:
_a : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCAmelCase = """aab"""
__lowerCAmelCase = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 5 | 1 |
"""simple docstring"""
def lowercase ( a__ : int = 50 ) -> int:
_UpperCamelCase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 256 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase ( a__ : str , a__ : bool = True , a__ : float = math.inf , a__ : float = -math.inf , a__ : float = math.inf , a__ : float = -math.inf , a__ : bool = False , a__ : float = 100 , a__ : float = 0.01 , a__ : float = 1 , ) -> Any:
_UpperCamelCase = False
_UpperCamelCase = search_prob
_UpperCamelCase = start_temperate
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = None
while not search_end:
_UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
_UpperCamelCase = current_state
scores.append(a__ )
iterations += 1
_UpperCamelCase = None
_UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_UpperCamelCase = random.randint(0 , len(a__ ) - 1 ) # picking a random neighbor
_UpperCamelCase = neighbors.pop(a__ )
_UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_UpperCamelCase = picked_neighbor
else:
_UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_UpperCamelCase = picked_neighbor
_UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_UpperCamelCase = True
else:
_UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a__ ) , a__ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase ( a__ : str , a__ : List[Any] ) -> Tuple:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowercase ( a__ : str , a__ : Optional[Any] ) -> Union[str, Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 256 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : List[Any] = BertTokenizer
_snake_case : Optional[Any] = BertTokenizerFast
_snake_case : int = True
_snake_case : Optional[Any] = True
_snake_case : List[str] = filter_non_english
def a__ ( self ):
super().setUp()
__a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def a__ ( self , lowerCamelCase ):
__a = "UNwant\u00E9d,running"
__a = "unwanted, running"
return input_text, output_text
def a__ ( self ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = "UNwant\u00E9d,running"
__a = tokenizer.tokenize(lowerCamelCase )
__a = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# With lower casing
__a = self.get_tokenizer(do_lower_case=lowerCamelCase )
__a = self.get_rust_tokenizer(do_lower_case=lowerCamelCase )
__a = "UNwant\u00E9d,running"
__a = tokenizer.tokenize(lowerCamelCase )
__a = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def a__ ( self ):
__a = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def a__ ( self ):
__a = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def a__ ( self ):
__a = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def a__ ( self ):
__a = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def a__ ( self ):
__a = BasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def a__ ( self ):
__a = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def a__ ( self ):
__a = BasicTokenizer(do_lower_case=lowerCamelCase , strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def a__ ( self ):
__a = BasicTokenizer(do_lower_case=lowerCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def a__ ( self ):
__a = BasicTokenizer()
__a = "a\n'll !!to?'d of, can't."
__a = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase ) , lowerCamelCase )
def a__ ( self ):
__a = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__a = {}
for i, token in enumerate(lowerCamelCase ):
__a = i
__a = WordpieceTokenizer(vocab=lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def a__ ( self ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def a__ ( self ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def a__ ( self ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def a__ ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def a__ ( self ):
__a = self.tokenizer_class.from_pretrained("bert-base-uncased" )
__a = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
__a = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__a = tokenizer_r.encode_plus(
lowerCamelCase , return_attention_mask=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_offsets_mapping=lowerCamelCase , add_special_tokens=lowerCamelCase , )
__a = tokenizer_r.do_lower_case if hasattr(lowerCamelCase , "do_lower_case" ) else False
__a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def a__ ( self ):
__a = ["的", "人", "有"]
__a = "".join(lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__a = True
__a = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
__a = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = False
__a = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = tokenizer_r.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = tokenizer_p.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
__a = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__a = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(lowerCamelCase )
]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 268 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:List[Any] = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__:Optional[int] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__:Tuple = {
"""openbmb/cpm-ant-10b""": 1024,
}
def _lowerCamelCase( a ):
__a = collections.OrderedDict()
with open(a , "r" , encoding="utf-8" ) as reader:
__a = reader.readlines()
for index, token in enumerate(a ):
__a = token.rstrip("\n" )
__a = index
return vocab
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase="<unk>" , lowerCamelCase=200 ):
__a = vocab
__a = unk_token
__a = max_input_chars_per_word
def a__ ( self , lowerCamelCase ):
__a = list(lowerCamelCase )
if len(lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__a = 0
__a = []
while start < len(lowerCamelCase ):
__a = len(lowerCamelCase )
__a = None
while start < end:
__a = "".join(chars[start:end] )
if substr in self.vocab:
__a = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase )
__a = end
return sub_tokens
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : int = ["""input_ids""", """attention_mask"""]
_snake_case : int = False
def __init__( self , lowerCamelCase , lowerCamelCase="<d>" , lowerCamelCase="</d>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="<unk>" , lowerCamelCase="</n>" , lowerCamelCase="</_>" , lowerCamelCase="left" , **lowerCamelCase , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowerCamelCase , eod_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , unk_token=lowerCamelCase , line_token=lowerCamelCase , space_token=lowerCamelCase , padding_side=lowerCamelCase , **lowerCamelCase , )
__a = bod_token
__a = eod_token
__a = load_vocab(lowerCamelCase )
__a = self.encoder[space_token]
__a = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
__a = {v: k for k, v in self.encoder.items()}
__a = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def a__ ( self ):
return self.encoder[self.bod_token]
@property
def a__ ( self ):
return self.encoder[self.eod_token]
@property
def a__ ( self ):
return self.encoder["\n"]
@property
def a__ ( self ):
return len(self.encoder )
def a__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , lowerCamelCase ):
__a = []
for x in jieba.cut(lowerCamelCase , cut_all=lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase ) )
return output_tokens
def a__ ( self , lowerCamelCase , **lowerCamelCase ):
__a = [i for i in token_ids if i >= 0]
__a = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase ):
return token in self.encoder
def a__ ( self , lowerCamelCase ):
return "".join(lowerCamelCase )
def a__ ( self , lowerCamelCase ):
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def a__ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase , self.unk_token )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if os.path.isdir(lowerCamelCase ):
__a = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
__a = (filename_prefix + "-" if filename_prefix else "") + save_directory
__a = 0
if " " in self.encoder:
__a = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
__a = self.encoder["\n"]
del self.encoder["\n"]
__a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
__a = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase ))
| 268 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class UpperCamelCase :
def __init__( self) -> Optional[int]:
snake_case_ = []
def a_ ( self, lowerCAmelCase__) -> Any:
return self.node_position[vertex]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = pos
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case_ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case_ = 2 * start + 1
else:
snake_case_ = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case_ , snake_case_ = heap[smallest_child], positions[smallest_child]
snake_case_ , snake_case_ = (
heap[start],
positions[start],
)
snake_case_ , snake_case_ = temp, tempa
snake_case_ = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child], self.get_position(positions[start]))
self.set_position(positions[start], lowerCAmelCase__)
self.top_to_bottom(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
snake_case_ = position[index]
while index != 0:
snake_case_ = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
snake_case_ = heap[parent]
snake_case_ = position[parent]
self.set_position(position[parent], lowerCAmelCase__)
else:
snake_case_ = val
snake_case_ = temp
self.set_position(lowerCAmelCase__, lowerCAmelCase__)
break
snake_case_ = parent
else:
snake_case_ = val
snake_case_ = temp
self.set_position(lowerCAmelCase__, 0)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = len(lowerCAmelCase__) // 2 - 1
for i in range(lowerCAmelCase__, -1, -1):
self.top_to_bottom(lowerCAmelCase__, lowerCAmelCase__, len(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = positions[0]
snake_case_ = sys.maxsize
self.top_to_bottom(lowerCAmelCase__, 0, len(lowerCAmelCase__), lowerCAmelCase__)
return temp
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = Heap()
snake_case_ = [0] * len(UpperCAmelCase )
snake_case_ = [-1] * len(UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case_ = [] # Heap of Distance of vertices from their neighboring vertex
snake_case_ = []
for vertex in range(len(UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase )
heap.node_position.append(UpperCAmelCase )
snake_case_ = []
snake_case_ = 1
snake_case_ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case_ = 0
snake_case_ = distance
heap.heapify(UpperCAmelCase , UpperCAmelCase )
for _ in range(1 , len(UpperCAmelCase ) ):
snake_case_ = heap.delete_minimum(UpperCAmelCase , UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case_ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase )]
):
snake_case_ = distance
heap.bottom_to_top(
UpperCAmelCase , heap.get_position(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase )
snake_case_ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__UpperCamelCase = int(input('''Enter number of edges: ''').strip())
__UpperCamelCase = defaultdict(list)
for _ in range(edges_number):
__UpperCamelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 69 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : List[str]=7 ) -> str:
lowercase : int = None
if token is not None:
lowercase : Any = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase : int = "636036"
lowercase : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase : int = requests.get(__snake_case , headers=__snake_case ).json()
return result["workflow_runs"]
def __magic_name__ ( __snake_case : Dict ) -> Tuple:
lowercase : Tuple = get_daily_ci_runs(__snake_case )
lowercase : Union[str, Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase : List[Any] = workflow_run["id"]
break
return workflow_run_id
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> int:
lowercase : Dict = get_last_daily_ci_runs(__snake_case )
if workflow_run_id is not None:
lowercase : Dict = get_artifacts_links(worflow_run_id=__snake_case , token=__snake_case )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__snake_case , artifact_url=__snake_case , output_dir=__snake_case , token=__snake_case )
def __magic_name__ ( __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Tuple ) -> Optional[int]:
get_last_daily_ci_artifacts(__snake_case , __snake_case , __snake_case )
lowercase : str = {}
for artifact_name in artifact_names:
lowercase : Optional[Any] = os.path.join(__snake_case , f"""{artifact_name}.zip""" )
if os.path.isfile(__snake_case ):
lowercase : List[Any] = {}
with zipfile.ZipFile(__snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(__snake_case ):
# read the file
with z.open(__snake_case ) as f:
lowercase : str = f.read().decode("UTF-8" )
return results
| 202 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __magic_name__ ( snake_case ):
UpperCamelCase_ :int = """Salesforce/blip-image-captioning-base"""
UpperCamelCase_ :List[str] = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
UpperCamelCase_ :int = """image_captioner"""
UpperCamelCase_ :Dict = AutoModelForVisionaSeq
UpperCamelCase_ :Tuple = ["""image"""]
UpperCamelCase_ :Tuple = ["""text"""]
def __init__( self , *_lowercase , **_lowercase )-> Tuple:
requires_backends(self , ["vision"] )
super().__init__(*_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , _lowercase )-> Any:
return self.pre_processor(images=_lowercase , return_tensors="pt" )
def UpperCAmelCase_ ( self , _lowercase )-> Union[str, Any]:
return self.model.generate(**_lowercase )
def UpperCAmelCase_ ( self , _lowercase )-> Tuple:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase )[0].strip()
| 60 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE :Tuple = 16
SCREAMING_SNAKE_CASE :Optional[Any] = 32
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1_6 , SCREAMING_SNAKE_CASE_ = "bert-base-cased" )-> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_ = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
model.eval()
UpperCamelCase_ = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
UpperCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config["lr"]
UpperCamelCase_ = int(config["num_epochs"] )
UpperCamelCase_ = int(config["seed"] )
UpperCamelCase_ = int(config["batch_size"] )
UpperCamelCase_ = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
UpperCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_ = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCamelCase_ = 1
UpperCamelCase_ = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
UpperCamelCase_ = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_ = 0
UpperCamelCase_ = evaluate.load("glue" , "mrpc" )
UpperCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase_ = args.resume_from_checkpoint.split("epoch_" )[1]
UpperCamelCase_ = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase_ = int(SCREAMING_SNAKE_CASE_ ) + 1
UpperCamelCase_ = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.print("resumed checkpoint performance:" , SCREAMING_SNAKE_CASE_ )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json" ) , "r" ) as f:
UpperCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase_ = {}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.loss
UpperCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase_ = f"epoch_{epoch}"
UpperCamelCase_ = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = accuracy
UpperCamelCase_ = lr_scheduler.get_lr()[0]
UpperCamelCase_ = optimizer.param_groups[0]["lr"]
UpperCamelCase_ = epoch
UpperCamelCase_ = overall_step
accelerator.print(f"epoch {epoch}:" , SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"state_{epoch}.json" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
"--output_dir" , type=SCREAMING_SNAKE_CASE_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=SCREAMING_SNAKE_CASE_ , default=2 , help="Number of train epochs." , )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 60 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
__A : Optional[Any] = IFInpaintingPipeline
__A : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__A : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__A : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return self._get_dummy_components()
def __lowercase ( self , lowercase , lowercase=0) -> Dict:
'''simple docstring'''
if str(a__).startswith('mps'):
a__ : List[Any] = torch.manual_seed(a__)
else:
a__ : Any = torch.Generator(device=a__).manual_seed(a__)
a__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__)).to(a__)
a__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__)).to(a__)
a__ : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowercase ( self) -> Tuple:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def __lowercase ( self) -> Any:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1)
def __lowercase ( self) -> Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def __lowercase ( self) -> str:
'''simple docstring'''
self._test_save_load_local()
def __lowercase ( self) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 99 |
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85 | 0 |
'''simple docstring'''
import os
import sys
import unittest
lowercase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase : int = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Optional[Any] = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : List[str] = {'''BertModelTest''': '''BertModelTester'''}
A : List[Any] = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : Dict = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : Any = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A : Optional[Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : str = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Dict = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Optional[Any] = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A : Any = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 366 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
A : Tuple = []
def generate(snake_case__ , snake_case__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A, A : Optional[Any] = arr[k - 1], arr[i]
else: # k is odd
A, A : Optional[Any] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip()
lowercase : int = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 311 | 0 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 5 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''wavlm'''
def __init__(self , UpperCAmelCase=3_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=1_2_8 , UpperCAmelCase=1_6 , UpperCAmelCase=3_2_0 , UpperCAmelCase=8_0_0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=1_0 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=1_0 , UpperCAmelCase=3_2_0 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_0 , UpperCAmelCase=2_5_6 , UpperCAmelCase=2_5_6 , UpperCAmelCase=0.1 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=2_5_6 , UpperCAmelCase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=5_1_2 , UpperCAmelCase=8_0 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
_lowercase =hidden_size
_lowercase =feat_extract_norm
_lowercase =feat_extract_activation
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =conv_bias
_lowercase =num_buckets
_lowercase =max_bucket_distance
_lowercase =num_conv_pos_embeddings
_lowercase =num_conv_pos_embedding_groups
_lowercase =len(self.conv_dim )
_lowercase =num_hidden_layers
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =num_attention_heads
_lowercase =hidden_dropout
_lowercase =attention_dropout
_lowercase =activation_dropout
_lowercase =feat_proj_dropout
_lowercase =final_dropout
_lowercase =layerdrop
_lowercase =layer_norm_eps
_lowercase =initializer_range
_lowercase =num_ctc_classes
_lowercase =vocab_size
_lowercase =do_stable_layer_norm
_lowercase =use_weighted_layer_sum
_lowercase =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowercase =apply_spec_augment
_lowercase =mask_time_prob
_lowercase =mask_time_length
_lowercase =mask_time_min_masks
_lowercase =mask_feature_prob
_lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowercase =num_codevectors_per_group
_lowercase =num_codevector_groups
_lowercase =contrastive_logits_temperature
_lowercase =num_negatives
_lowercase =codevector_dim
_lowercase =proj_codevector_dim
_lowercase =diversity_loss_weight
# ctc loss
_lowercase =ctc_loss_reduction
_lowercase =ctc_zero_infinity
# adapter
_lowercase =add_adapter
_lowercase =adapter_kernel_size
_lowercase =adapter_stride
_lowercase =num_adapter_layers
_lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =list(UpperCAmelCase )
_lowercase =xvector_output_dim
@property
def __A (self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 | 1 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int = 10_00 ) -> int:
'''simple docstring'''
_a = 2**power
_a = 0
while n:
_a , _a = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 371 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a_ : Optional[Any] = TypeVar("T")
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = []
_a = {}
_a = 0
def __len__( self ) -> int:
return self.elements
def __repr__( self ) -> str:
return str(self.heap )
def __UpperCAmelCase ( self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_a = self.elements
self.elements += 1
self._bubble_up(__magic_name__ )
def __UpperCAmelCase ( self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_a , _a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_a , _a = self.heap[0]
self._bubble_down(__magic_name__ )
return elem
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Update the weight of the given key
_a = self.position_map[elem]
_a = (elem, weight)
if position > 0:
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
else:
self._bubble_down(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_a = self.position_map[elem]
if curr_pos == 0:
return None
_a = get_parent_position(__magic_name__ )
_a , _a = self.heap[curr_pos]
_a , _a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_up(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_a = self.position_map[elem]
_a , _a = self.heap[curr_pos]
_a = get_child_left_position(__magic_name__ )
_a = get_child_right_position(__magic_name__ )
if child_left_position < self.elements and child_right_position < self.elements:
_a , _a = self.heap[child_left_position]
_a , _a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
if child_left_position < self.elements:
_a , _a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
else:
return None
if child_right_position < self.elements:
_a , _a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__magic_name__ , __magic_name__ )
return self._bubble_down(__magic_name__ )
return None
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> None:
# Swap the nodes at the given positions
_a = self.heap[nodea_pos][0]
_a = self.heap[nodea_pos][0]
_a , _a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_a = nodea_pos
_a = nodea_pos
class a ( Generic[T] ):
def __init__( self ) -> None:
_a = {}
_a = 0
def __repr__( self ) -> str:
return str(self.connections )
def __len__( self ) -> int:
return self.nodes
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_a = {}
self.nodes += 1
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__magic_name__ )
self.add_node(__magic_name__ )
_a = weight
_a = weight
def _A (lowerCAmelCase__ :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
_a = {node: maxsize for node in graph.connections}
_a = {node: None for node in graph.connections}
_a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_a = priority_queue.extract_min()
_a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
# running prim's algorithm
while not priority_queue.is_empty():
_a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
_a = node
return dist, parent
| 104 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase_ :
def __init__( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : str=30 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Any=37 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=10 , lowerCAmelCase_ : str=0.0_2 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[str]=2 , ) -> Optional[Any]:
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Optional[Any] = image_size
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ : Optional[Any] = (image_size // patch_size) ** 2
UpperCAmelCase_ : Optional[Any] = num_patches + 2
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = None
if self.use_labels:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : str = DeiTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = DeiTForMaskedImageModeling(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : str = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : List[str] = DeiTForMaskedImageModeling(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.type_sequence_label_size
UpperCAmelCase_ : Dict = DeiTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : str = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : Dict = DeiTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (__A , __A , unittest.TestCase ):
__magic_name__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = DeiTModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any]=False ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ : List[str] = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : Any = model(**lowerCAmelCase_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase_ : List[str] = model_class(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase_ )
model.train()
UpperCAmelCase_ : Optional[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
UpperCAmelCase_ : int = model(**lowerCAmelCase_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : str = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase_ ),
*get_values(lowerCAmelCase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ):
UpperCAmelCase_ : List[Any] = problem_type["title"]
UpperCAmelCase_ : List[str] = problem_type["num_labels"]
UpperCAmelCase_ : Any = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
UpperCAmelCase_ : Optional[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ : Union[str, Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCAmelCase_ : Optional[int] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase_ ) as warning_list:
UpperCAmelCase_ : Any = model(**lowerCAmelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[int] = DeiTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ):
UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
UpperCAmelCase_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : Any = image_processor(images=lowerCAmelCase_ , return_tensors="pt" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCAmelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : int = image_processor(images=lowerCAmelCase_ , return_tensors="pt" )
UpperCAmelCase_ : Optional[int] = inputs.pixel_values.to(lowerCAmelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ : int = model(lowerCAmelCase_ )
| 268 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase_ = get_logger(__name__)
class UpperCamelCase_ :
__magic_name__ = '''dummy_data'''
__magic_name__ = '''datasets'''
__magic_name__ = False
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[Version, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[List[Callable]] = None , ) -> Tuple:
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : int = dataset_name
UpperCAmelCase_ : Optional[int] = cache_dir
UpperCAmelCase_ : Tuple = use_local_dummy_data
UpperCAmelCase_ : int = config
# download_callbacks take a single url as input
UpperCAmelCase_ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
UpperCAmelCase_ : Optional[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
UpperCAmelCase_ : Dict = str(lowerCAmelCase_ )
# to be downloaded
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : int = None
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
if self._dummy_file is None:
UpperCAmelCase_ : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
UpperCAmelCase_ : Union[str, Any] = cached_path(
lowerCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCAmelCase_ , force_extract=lowerCAmelCase_ )
return os.path.join(lowerCAmelCase_ , self.dummy_file_name )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
if self._bucket_url is None:
UpperCAmelCase_ : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : List[Any] ) -> Optional[int]:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
UpperCAmelCase_ : Dict = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
UpperCAmelCase_ : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return self.create_dummy_data_dict(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return self.create_dummy_data_list(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return self.create_dummy_data_single(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Union[str, Any] ) -> Any:
return self.download_and_extract(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Any:
return self.download_and_extract(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return path
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
return {}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Dict = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
for single_url in single_urls:
download_callback(lowerCAmelCase_ )
else:
UpperCAmelCase_ : Tuple = single_urls
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : List[str] = [os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) for x in single_urls]
else:
UpperCAmelCase_ : Optional[int] = single_urls
UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) )
UpperCAmelCase_ : int = value
# make sure that values are unique
if all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
UpperCAmelCase_ : List[str] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict:
UpperCAmelCase_ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
UpperCAmelCase_ : int = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowerCAmelCase_ ) ) for url in data_url )
UpperCAmelCase_ : Union[str, Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
UpperCAmelCase_ : Tuple = [data_url[0]] * len(lowerCAmelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(lowerCAmelCase_ )
return dummy_data_list
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ) -> Optional[int]:
for download_callback in self.download_callbacks:
download_callback(lowerCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(lowerCAmelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
pass
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
def _iter_archive_members(lowerCAmelCase_ : Dict ):
# this preserves the order of the members inside the ZIP archive
UpperCAmelCase_ : str = Path(self.dummy_file ).parent
UpperCAmelCase_ : Optional[Any] = path.relative_to(lowerCAmelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
UpperCAmelCase_ : str = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = Path(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = _iter_archive_members(lowerCAmelCase_ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(lowerCAmelCase_ ).as_posix(), file_path.open("rb" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> str:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = [paths]
for path in paths:
if os.path.isfile(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowerCAmelCase_ ):
if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(lowerCAmelCase_ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
| 268 | 1 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class __snake_case (_a ):
def __init__( self : str , *_UpperCAmelCase : Any , **_UpperCAmelCase : List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 159 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_lowerCamelCase : Dict = namedtuple("covid_data", "cases deaths recovered")
def _UpperCAmelCase (UpperCamelCase_ : str = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
_lowerCAmelCase : Dict = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(UpperCamelCase_ ).content ).xpath(UpperCamelCase_ ) )
_lowerCamelCase : Tuple = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 159 | 1 |
"""simple docstring"""
from collections.abc import Callable
def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ):
lowerCAmelCase : float = a
lowerCAmelCase : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowerCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
lowerCAmelCase : Dict = mid
else:
lowerCAmelCase : List[str] = mid
lowerCAmelCase : List[Any] = start + (end - start) / 2.0
return mid
def _snake_case ( _snake_case : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 60 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : List[Any] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case_:
__UpperCamelCase = PegasusConfig
__UpperCamelCase = {}
__UpperCamelCase = '''gelu'''
def __init__( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any=1_3 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Optional[Any]=9_9 , UpperCamelCase_ : Any=3_2 , UpperCamelCase_ : List[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : str=3_7 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=2_0 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : Any=0 , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Any = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = eos_token_id
lowerCAmelCase : List[Any] = pad_token_id
lowerCAmelCase : List[str] = bos_token_id
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase : Dict = prepare_pegasus_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Any = 2_0
lowerCAmelCase : Any = model_class_name(UpperCamelCase_ )
lowerCAmelCase : List[str] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : Any = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : int = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ):
lowerCAmelCase : Dict = 2_0
lowerCAmelCase : Union[str, Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Any = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : List[Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _snake_case ( _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any]=None , _snake_case : Dict=None , ):
if attention_mask is None:
lowerCAmelCase : Tuple = np.not_equal(_snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = FlaxPegasusModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : str = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Tuple ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Tuple = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Dict = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = model_class(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : Any = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : Optional[Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Any = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : int = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : List[Any] = np.ones((1, 1) )
lowerCAmelCase : str = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase : List[Any] = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase : int = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowerCAmelCase : str = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
lowerCAmelCase : Optional[Any] = tokenizer(UpperCamelCase_ , return_tensors='''np''' , truncation=UpperCamelCase_ , max_length=5_1_2 , padding=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model.generate(**UpperCamelCase_ , num_beams=2 ).sequences
lowerCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
assert tgt_text == decoded
| 60 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class UpperCamelCase_ :
def __init__( self : Optional[Any] ) -> None:
UpperCAmelCase_ : list[Any] = []
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int = 0
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool:
return self.head == self.tail
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Any ) -> None:
self.data.append(lowerCAmelCase_ )
UpperCAmelCase_ : int = self.tail + 1
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_ : List[str] = self.data[self.head]
UpperCAmelCase_ : Optional[int] = self.head + 1
return ret
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.tail - self.head
def _SCREAMING_SNAKE_CASE ( self : Any ) -> None:
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class UpperCamelCase_ :
def __init__( self : int , lowerCAmelCase_ : Any ) -> None:
UpperCAmelCase_ : Dict = data
UpperCAmelCase_ : MyNode | None = None
UpperCAmelCase_ : MyNode | None = None
UpperCAmelCase_ : int = 1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
return self.data
def _SCREAMING_SNAKE_CASE ( self : Any ) -> MyNode | None:
return self.left
def _SCREAMING_SNAKE_CASE ( self : Any ) -> MyNode | None:
return self.right
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return self.height
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Any ) -> None:
UpperCAmelCase_ : str = data
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : MyNode | None ) -> None:
UpperCAmelCase_ : List[Any] = node
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : MyNode | None ) -> None:
UpperCAmelCase_ : Dict = node
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : str = height
def snake_case ( A__ ):
if node is None:
return 0
return node.get_height()
def snake_case ( A__ ,A__ ):
if a > b:
return a
return b
def snake_case ( A__ ):
print("left rotation node:" ,node.get_data() )
UpperCAmelCase_ : Tuple = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(A__ )
UpperCAmelCase_ : List[Any] = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(A__ )
UpperCAmelCase_ : List[str] = my_max(get_height(ret.get_right() ) ,get_height(ret.get_left() ) ) + 1
ret.set_height(A__ )
return ret
def snake_case ( A__ ):
print("right rotation node:" ,node.get_data() )
UpperCAmelCase_ : str = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(A__ )
UpperCAmelCase_ : Any = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(A__ )
UpperCAmelCase_ : List[Any] = my_max(get_height(ret.get_right() ) ,get_height(ret.get_left() ) ) + 1
ret.set_height(A__ )
return ret
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = node.get_left()
assert left_child is not None
node.set_left(left_rotation(A__ ) )
return right_rotation(A__ )
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[int] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(A__ ) )
return left_rotation(A__ )
def snake_case ( A__ ,A__ ):
if node is None:
return MyNode(A__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() ,A__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
UpperCAmelCase_ : Optional[Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCAmelCase_ : str = right_rotation(A__ )
else:
UpperCAmelCase_ : List[Any] = lr_rotation(A__ )
else:
node.set_right(insert_node(node.get_right() ,A__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
UpperCAmelCase_ : Any = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCAmelCase_ : List[Any] = rl_rotation(A__ )
else:
UpperCAmelCase_ : int = left_rotation(A__ )
UpperCAmelCase_ : int = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(A__ )
return node
def snake_case ( A__ ):
while True:
UpperCAmelCase_ : Union[str, Any] = root.get_right()
if right_child is None:
break
UpperCAmelCase_ : int = right_child
return root.get_data()
def snake_case ( A__ ):
while True:
UpperCAmelCase_ : Dict = root.get_left()
if left_child is None:
break
UpperCAmelCase_ : Tuple = left_child
return root.get_data()
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : int = root.get_left()
UpperCAmelCase_ : Dict = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCAmelCase_ : Dict = get_left_most(A__ )
root.set_data(A__ )
root.set_right(del_node(A__ ,A__ ) )
elif left_child is not None:
UpperCAmelCase_ : Optional[int] = left_child
elif right_child is not None:
UpperCAmelCase_ : List[Any] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(A__ ,A__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(A__ ,A__ ) )
if get_height(A__ ) - get_height(A__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
UpperCAmelCase_ : Any = left_rotation(A__ )
else:
UpperCAmelCase_ : Any = rl_rotation(A__ )
elif get_height(A__ ) - get_height(A__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
UpperCAmelCase_ : Tuple = right_rotation(A__ )
else:
UpperCAmelCase_ : str = lr_rotation(A__ )
UpperCAmelCase_ : Optional[Any] = my_max(get_height(root.get_right() ) ,get_height(root.get_left() ) ) + 1
root.set_height(A__ )
return root
class UpperCamelCase_ :
def __init__( self : Dict ) -> None:
UpperCAmelCase_ : MyNode | None = None
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return get_height(self.root )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Any ) -> None:
print("insert:" + str(lowerCAmelCase_ ) )
UpperCAmelCase_ : List[str] = insert_node(self.root , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Any ) -> None:
print("delete:" + str(lowerCAmelCase_ ) )
if self.root is None:
print("Tree is empty!" )
return
UpperCAmelCase_ : int = del_node(self.root , lowerCAmelCase_ )
def __str__( self : int , ) -> str: # a level traversale, gives a more intuitive look on the tree
UpperCAmelCase_ : Dict = ""
UpperCAmelCase_ : List[str] = MyQueue()
q.push(self.root )
UpperCAmelCase_ : List[Any] = self.get_height()
if layer == 0:
return output
UpperCAmelCase_ : Union[str, Any] = 0
while not q.is_empty():
UpperCAmelCase_ : int = q.pop()
UpperCAmelCase_ : Any = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase_ )
q.push(lowerCAmelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
UpperCAmelCase_ : List[Any] = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase_ ) - 1:
UpperCAmelCase_ : List[str] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def snake_case ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowerCamelCase_ = AVLtree()
lowerCamelCase_ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 253 |
"""simple docstring"""
import numpy as np
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : Tuple = int(np.ceil((x_end - xa) / h ) )
UpperCAmelCase_ : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase_ : List[Any] = ya
UpperCAmelCase_ : Optional[int] = xa
for k in range(A__ ):
UpperCAmelCase_ : List[str] = f(A__ ,y[k] )
UpperCAmelCase_ : Any = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCAmelCase_ : Union[str, Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
UpperCAmelCase_ : Dict = f(x + h ,y[k] + h * ka )
UpperCAmelCase_ : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 | 1 |
from collections.abc import Callable
import numpy as np
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = int(np.ceil((x_end - xa) / step_size ) )
__a = np.zeros((n + 1,) )
__a = ya
__a = xa
for k in range(_UpperCAmelCase ):
__a = y[k] + step_size * ode_func(_UpperCAmelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __a ( A__ ):
_lowerCAmelCase : UNetaDModel
_lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Dict , SCREAMING_SNAKE_CASE : UNetaDModel , SCREAMING_SNAKE_CASE : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self : Tuple , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 20_00 , SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.unet.config.sample_size
UpperCamelCase__ : List[str] = (batch_size, 3, img_size, img_size)
UpperCamelCase__ : int = self.unet
UpperCamelCase__ : int = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ) * self.scheduler.init_noise_sigma
UpperCamelCase__ : List[Any] = sample.to(self.device )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
self.scheduler.set_sigmas(SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCamelCase__ : Dict = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
UpperCamelCase__ : str = self.scheduler.step_correct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
# prediction step
UpperCamelCase__ : Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
UpperCamelCase__ : str = self.scheduler.step_pred(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ : Dict = output.prev_sample, output.prev_sample_mean
UpperCamelCase__ : Union[str, Any] = sample_mean.clamp(0 , 1 )
UpperCamelCase__ : Optional[int] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 196 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase : Optional[Any] =False
class __a ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
def __lowercase ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : Dict = torch.manual_seed(0 )
UpperCamelCase__ : str = pipe.dual_guided(
prompt="first prompt" , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = generator.manual_seed(0 )
UpperCamelCase__ : Dict = pipe.dual_guided(
prompt="first prompt" , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = "cyberpunk 2077"
UpperCamelCase__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Any = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase__ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : List[str] = "A painting of a squirrel eating a burger "
UpperCamelCase__ : List[Any] = torch.manual_seed(0 )
UpperCamelCase__ : Dict = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCamelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Dict = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : Any = pipe.image_variation(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type="numpy" ).images
UpperCamelCase__ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : int = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 196 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[str] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase : List[str] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
_lowercase : Union[str, Any] = {
'allenai/led-base-16384': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase__ ( ):
__UpperCAmelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__UpperCAmelCase = bs[:]
__UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def lowercase__ ( snake_case_ :Optional[Any] ):
__UpperCAmelCase = set()
__UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase = char
return pairs
class _UpperCAmelCase ( lowerCamelCase__ ):
a__ : int = VOCAB_FILES_NAMES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : Dict , _lowercase : Tuple , _lowercase : Dict , _lowercase : Union[str, Any]="replace" , _lowercase : Any="<s>" , _lowercase : Dict="</s>" , _lowercase : Union[str, Any]="</s>" , _lowercase : Dict="<s>" , _lowercase : Dict="<unk>" , _lowercase : Union[str, Any]="<pad>" , _lowercase : Tuple="<mask>" , _lowercase : List[str]=False , **_lowercase : List[Any] , ):
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else bos_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else eos_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else sep_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else cls_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else unk_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
with open(lowercase__ , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(lowercase__ )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = errors # how to handle errors in decoding
__UpperCAmelCase = bytes_to_unicode()
__UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase__ , encoding='''utf-8''' ) as merges_handle:
__UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
__UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = {}
__UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a ( self : Union[str, Any] ):
return len(self.encoder )
def a ( self : List[str] ):
return dict(self.encoder , **self.added_tokens_encoder )
def a ( self : Optional[Any] , _lowercase : Optional[Any] ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase = tuple(lowercase__ )
__UpperCAmelCase = get_pairs(lowercase__ )
if not pairs:
return token
while True:
__UpperCAmelCase = min(lowercase__ , key=lambda _lowercase : self.bpe_ranks.get(lowercase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase = bigram
__UpperCAmelCase = []
__UpperCAmelCase = 0
while i < len(lowercase__ ):
try:
__UpperCAmelCase = word.index(lowercase__ , lowercase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase = j
if word[i] == first and i < len(lowercase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase = tuple(lowercase__ )
__UpperCAmelCase = new_word
if len(lowercase__ ) == 1:
break
else:
__UpperCAmelCase = get_pairs(lowercase__ )
__UpperCAmelCase = ''' '''.join(lowercase__ )
__UpperCAmelCase = word
return word
def a ( self : List[Any] , _lowercase : List[Any] ):
__UpperCAmelCase = []
for token in re.findall(self.pat , lowercase__ ):
__UpperCAmelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase__ ).split(''' ''' ) )
return bpe_tokens
def a ( self : str , _lowercase : Tuple ):
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token ) )
def a ( self : Union[str, Any] , _lowercase : Any ):
return self.decoder.get(lowercase__ )
def a ( self : Optional[int] , _lowercase : Optional[Any] ):
__UpperCAmelCase = ''''''.join(lowercase__ )
__UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def a ( self : List[Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__ ) + '''\n''' )
__UpperCAmelCase = 0
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__UpperCAmelCase = token_index
writer.write(''' '''.join(lowercase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ )) + [1]
def a ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self : int , _lowercase : Optional[int] , _lowercase : Optional[Any]=False , **_lowercase : Optional[int] ):
__UpperCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase__ ) > 0 and not text[0].isspace()):
__UpperCAmelCase = ''' ''' + text
return (text, kwargs)
def a ( self : Any , _lowercase : Union[Dict[str, EncodedInput], BatchEncoding] , _lowercase : Optional[int] = None , _lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , ):
__UpperCAmelCase = super()._pad(
encoded_inputs=lowercase__ , max_length=lowercase__ , padding_strategy=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , )
# Load from model defaults
if return_attention_mask is None:
__UpperCAmelCase = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__UpperCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__UpperCAmelCase = len(encoded_inputs['''global_attention_mask'''] ) != len(lowercase__ )
if needs_to_be_padded:
__UpperCAmelCase = len(lowercase__ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__UpperCAmelCase = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__UpperCAmelCase = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 332 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def _A ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A__ ):
http_head('''https://huggingface.co''' )
| 104 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Union[str, Any] = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Dict = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Any = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[Any] = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Dict = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[Any] = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Dict = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Dict = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Dict = ['''flax''']
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def _lowercase ( cls , *_lowercase , **_lowercase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
| 229 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
_lowercase = logging.getLogger(__name__)
_lowercase = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[Any] = '''bertabs'''
def __init__( self , _lowercase=30_522 , _lowercase=512 , _lowercase=6 , _lowercase=512 , _lowercase=8 , _lowercase=512 , _lowercase=0.2 , _lowercase=6 , _lowercase=768 , _lowercase=8 , _lowercase=2_048 , _lowercase=0.2 , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_pos
_lowerCAmelCase = enc_layers
_lowerCAmelCase = enc_hidden_size
_lowerCAmelCase = enc_heads
_lowerCAmelCase = enc_ff_size
_lowerCAmelCase = enc_dropout
_lowerCAmelCase = dec_layers
_lowerCAmelCase = dec_hidden_size
_lowerCAmelCase = dec_heads
_lowerCAmelCase = dec_ff_size
_lowerCAmelCase = dec_dropout
| 229 | 1 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] )->Tuple: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def _lowerCAmelCase ( lowerCAmelCase_ :Any )->List[Any]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class __lowerCAmelCase ( a ):
"""simple docstring"""
def lowerCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
snake_case_ = {}
snake_case_ = []
snake_case_ = 1
snake_case_ = [1, 2]
snake_case_ = {"a": 1, "b": 2}
snake_case_ = {"a": [1, 2], "b": [3, 4]}
snake_case_ = {"a": {"1": 1}, "b": 2}
snake_case_ = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ = {}
snake_case_ = []
snake_case_ = 2
snake_case_ = [2, 3]
snake_case_ = {"a": 2, "b": 3}
snake_case_ = {"a": [2, 3], "b": [4, 5]}
snake_case_ = {"a": {"1": 2}, "b": 3}
snake_case_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
snake_case_ = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
snake_case_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
snake_case_ = {"a": 2, "b": 0, "c": 2}
snake_case_ = {
"a": np.eye(2 ).astype(_lowerCAmelCase ),
"b": np.zeros(3 ).astype(_lowerCAmelCase ),
"c": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = {"a": 1, "b": 2}
snake_case_ = {"a": 3, "b": 4}
snake_case_ = {"a": 5, "b": 6}
snake_case_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
class __lowerCAmelCase :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'bar'
snake_case_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(_lowerCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Any , lowerCAmelCase_ :str )->int:
'''simple docstring'''
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
snake_case_ = {F'''{i}''': i for i in range(lowerCAmelCase_ )}
snake_case_ = map_nested(lambda lowerCAmelCase_ : x + 10 , lowerCAmelCase_ , num_proc=lowerCAmelCase_ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __lowerCAmelCase ( a ):
"""simple docstring"""
@require_tf
def lowerCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
snake_case_ = layers.Dense(2 )
def gen_random_output():
snake_case_ = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(4_2 , set_tensorflow=_lowerCAmelCase ):
snake_case_ = gen_random_output()
with temp_seed(4_2 , set_tensorflow=_lowerCAmelCase ):
snake_case_ = gen_random_output()
snake_case_ = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def lowerCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
import torch
def gen_random_output():
snake_case_ = torch.nn.Linear(3 , 2 )
snake_case_ = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(4_2 , set_pytorch=_lowerCAmelCase ):
snake_case_ = gen_random_output()
with temp_seed(4_2 , set_pytorch=_lowerCAmelCase ):
snake_case_ = gen_random_output()
snake_case_ = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def lowerCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
snake_case_ = gen_random_output()
with temp_seed(4_2 ):
snake_case_ = gen_random_output()
snake_case_ = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] )->Optional[int]:
'''simple docstring'''
snake_case_ = NestedDataStructure(lowerCAmelCase_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :Tuple )->Union[str, Any]:
'''simple docstring'''
snake_case_ = NestedDataStructure(lowerCAmelCase_ ).flatten()
assert output == expected_output
def _lowerCAmelCase ( )->str:
'''simple docstring'''
snake_case_ = A(x=1 , y="foobar" )
snake_case_ = {"x": 1, "y": "foobar"}
assert asdict(lowerCAmelCase_ ) == expected_output
snake_case_ = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
snake_case_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCAmelCase_ ) == expected_output
with pytest.raises(lowerCAmelCase_ ):
asdict([1, A(x=10 , y="foo" )] )
def _lowerCAmelCase ( lowerCAmelCase_ :str )->Any:
'''simple docstring'''
return text.split()
def _lowerCAmelCase ( lowerCAmelCase_ :int )->int:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCAmelCase ( )->List[str]:
'''simple docstring'''
with Pool(2 ) as pool:
snake_case_ = list(iflatmap_unordered(lowerCAmelCase_ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCAmelCase_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
snake_case_ = list(iflatmap_unordered(lowerCAmelCase_ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCAmelCase_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
snake_case_ = []
for yield_time, content in iflatmap_unordered(
lowerCAmelCase_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCAmelCase_ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCAmelCase_ ) == 4
| 159 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 159 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def __snake_case ( self : int):
'''simple docstring'''
torch.manual_seed(0)
lowerCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def __snake_case ( self : List[Any]):
'''simple docstring'''
torch.manual_seed(0)
lowerCAmelCase__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def __snake_case ( self : Tuple):
'''simple docstring'''
torch.manual_seed(0)
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase__)
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = self.dummy_uncond_unet
lowerCAmelCase__ = DDIMScheduler()
lowerCAmelCase__ = self.dummy_vq_model
lowerCAmelCase__ = LDMPipeline(unet=lowercase__ , vqvae=lowercase__ , scheduler=lowercase__)
ldm.to(lowercase__)
ldm.set_progress_bar_config(disable=lowercase__)
lowerCAmelCase__ = torch.manual_seed(0)
lowerCAmelCase__ = ldm(generator=lowercase__ , num_inference_steps=2 , output_type='numpy').images
lowerCAmelCase__ = torch.manual_seed(0)
lowerCAmelCase__ = ldm(generator=lowercase__ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase__)[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172])
lowerCAmelCase__ = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256')
ldm.to(lowercase__)
ldm.set_progress_bar_config(disable=lowercase__)
lowerCAmelCase__ = torch.manual_seed(0)
lowerCAmelCase__ = ldm(generator=lowercase__ , num_inference_steps=5 , output_type='numpy').images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase__ = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447])
lowerCAmelCase__ = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
| 119 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 1_6 ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
lowerCAmelCase__ , padding='longest' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase__ ) == "1":
lowerCAmelCase__ = 2
# Initialize accelerator
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['lr']
lowerCAmelCase__ = int(config['num_epochs'] )
lowerCAmelCase__ = int(config['seed'] )
lowerCAmelCase__ = int(config['batch_size'] )
lowerCAmelCase__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**lowerCAmelCase__ )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCAmelCase__ = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCAmelCase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ )
def __lowerCamelCase ( ):
lowerCAmelCase__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 119 | 1 |
import functools
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = len(a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(a )
@functools.cache
def min_distance(a , a ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
SCREAMING_SNAKE_CASE_ : str = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a ) , 1 + min_distance(a , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCAmelCase : Any = logging.getLogger(__name__)
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE=-1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = label_idx
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : int = mode.value
SCREAMING_SNAKE_CASE_ : Any = os.path.join(_SCREAMING_SNAKE_CASE , f"{mode}.txt" )
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Any = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) )
guid_index += 1
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Dict = []
else:
SCREAMING_SNAKE_CASE_ : List[str] = line.split(' ' )
words.append(splits[0] )
if len(_SCREAMING_SNAKE_CASE ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) )
return examples
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(_SCREAMING_SNAKE_CASE )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
SCREAMING_SNAKE_CASE_ : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(_SCREAMING_SNAKE_CASE )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _A ( __magic_name__):
def __init__( self ):
"""simple docstring"""
super().__init__(label_idx=-2 )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : int = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ : int = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _A ( __magic_name__):
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Dict = mode.value
SCREAMING_SNAKE_CASE_ : str = os.path.join(_SCREAMING_SNAKE_CASE , f"{mode}.txt" )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Tuple = []
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
for sentence in parse_incr(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : List[str] = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) )
guid_index += 1
return examples
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 0
for sentence in parse_incr(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = preds_list[example_id]
SCREAMING_SNAKE_CASE_ : Any = ''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(_SCREAMING_SNAKE_CASE )
example_id += 1
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if path:
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 253 | 1 |
from __future__ import annotations
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(_A ) // 2
# choose the middle 3 elements
SCREAMING_SNAKE_CASE__ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 218 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__lowerCAmelCase = None
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__lowerCAmelCase = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
__lowerCAmelCase = '''▁'''
class __a ( __UpperCamelCase ):
__lowercase : int = VOCAB_FILES_NAMES
__lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Dict = ['input_ids', 'attention_mask']
__lowercase : str = BarthezTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: Union[str, Any] = vocab_file
lowercase__: Union[str, Any] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: str = [self.cls_token_id]
lowercase__: List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: Optional[Any] = [self.sep_token_id]
lowercase__: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: str = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 196 |
from __future__ import annotations
def snake_case_ ( snake_case , snake_case ) -> list[int]:
lowercase__: List[str] = 0
lowercase__: Dict = len(snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__: Dict = i + 1
else:
lowercase__: List[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 196 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def _lowerCamelCase( a = 2_0_0_0_0_0_0 ):
__a = [0]
__a = 4_2
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__a = 0
# the area corresponding to the grid that gives the product closest to target
__a = 0
# an estimate of b, using the quadratic formula
__a = 4_2
# the largest integer less than b_estimate
__a = 4_2
# the largest integer less than b_estimate
__a = 4_2
# the triangle number corresponding to b_floor
__a = 4_2
# the triangle number corresponding to b_ceil
__a = 4_2
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__a = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__a = floor(a )
__a = ceil(a )
__a = triangle_numbers[b_floor]
__a = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__a = triangle_b_first_guess * triangle_a
__a = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__a = triangle_b_second_guess * triangle_a
__a = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 350 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 384}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = do_resize
__a = size
# Default value set here for backwards compatibility where the value in config is None
__a = crop_pct if crop_pct is not None else 224 / 256
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}" )
__a = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__a = int(shortest_edge / crop_pct )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase , **lowerCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = crop_pct if crop_pct is not None else self.crop_pct
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , crop_pct=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 268 | 0 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_A : List[str] = None
try:
import msvcrt
except ImportError:
_A : Any = None
try:
import fcntl
except ImportError:
_A : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_A : Optional[int] = OSError
# Data
# ------------------------------------------------
_A : Dict = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
_A : List[str] = '''3.0.12'''
_A : Optional[int] = None
def UpperCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
global _logger
__lowerCAmelCase = _logger or logging.getLogger(__name__ )
return _logger
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
__lowerCAmelCase = lock_file
return None
def __str__( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
__lowerCAmelCase = lock
return None
def __enter__( self : Dict ) -> Dict:
return self.lock
def __exit__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
self.lock.release()
return None
class _lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=-1 , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> Union[str, Any]:
__lowerCAmelCase = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__lowerCAmelCase = self.hash_filename_if_too_long(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# The path to the lock file.
__lowerCAmelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCAmelCase = None
# The default timeout value.
__lowerCAmelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCAmelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCAmelCase = 0
return None
@property
def a ( self : Optional[Any] ) -> Union[str, Any]:
return self._lock_file
@property
def a ( self : List[str] ) -> Tuple:
return self._timeout
@timeout.setter
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> str:
__lowerCAmelCase = float(SCREAMING_SNAKE_CASE__ )
return None
def a ( self : Tuple ) -> Any:
raise NotImplementedError()
def a ( self : Any ) -> Union[str, Any]:
raise NotImplementedError()
@property
def a ( self : Any ) -> List[Any]:
return self._lock_file_fd is not None
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0_5 ) -> List[str]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCAmelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCAmelCase = id(self )
__lowerCAmelCase = self._lock_file
__lowerCAmelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(SCREAMING_SNAKE_CASE__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCAmelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=False ) -> List[str]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCAmelCase = id(self )
__lowerCAmelCase = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCAmelCase = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : Dict ) -> Dict:
self.acquire()
return self
def __exit__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
self.release()
return None
def __del__( self : Optional[Any] ) -> Optional[Any]:
self.release(force=SCREAMING_SNAKE_CASE__ )
return None
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> str:
__lowerCAmelCase = os.path.basename(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > max_length and max_length > 0:
__lowerCAmelCase = os.path.dirname(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = str(hash(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = filename[: max_length - len(SCREAMING_SNAKE_CASE__ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return path
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=-1 , SCREAMING_SNAKE_CASE__ : int=None ) -> List[Any]:
from .file_utils import relative_to_absolute_path
super().__init__(SCREAMING_SNAKE_CASE__ , timeout=SCREAMING_SNAKE_CASE__ , max_filename_length=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def a ( self : str ) -> Tuple:
__lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCAmelCase = os.open(self._lock_file , SCREAMING_SNAKE_CASE__ )
except OSError:
pass
else:
try:
msvcrt.locking(SCREAMING_SNAKE_CASE__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(SCREAMING_SNAKE_CASE__ )
else:
__lowerCAmelCase = fd
return None
def a ( self : List[str] ) -> Dict:
__lowerCAmelCase = self._lock_file_fd
__lowerCAmelCase = None
msvcrt.locking(SCREAMING_SNAKE_CASE__ , msvcrt.LK_UNLCK , 1 )
os.close(SCREAMING_SNAKE_CASE__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=-1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> Optional[Any]:
__lowerCAmelCase = os.statvfs(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ).f_namemax
super().__init__(SCREAMING_SNAKE_CASE__ , timeout=SCREAMING_SNAKE_CASE__ , max_filename_length=SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCAmelCase = os.open(self._lock_file , SCREAMING_SNAKE_CASE__ )
try:
fcntl.flock(SCREAMING_SNAKE_CASE__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(SCREAMING_SNAKE_CASE__ )
else:
__lowerCAmelCase = fd
return None
def a ( self : Tuple ) -> Optional[int]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCAmelCase = self._lock_file_fd
__lowerCAmelCase = None
fcntl.flock(SCREAMING_SNAKE_CASE__ , fcntl.LOCK_UN )
os.close(SCREAMING_SNAKE_CASE__ )
return None
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def a ( self : str ) -> Tuple:
__lowerCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCAmelCase = os.open(self._lock_file , SCREAMING_SNAKE_CASE__ )
except OSError:
pass
else:
__lowerCAmelCase = fd
return None
def a ( self : List[str] ) -> Optional[int]:
os.close(self._lock_file_fd )
__lowerCAmelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_A : List[str] = None
if msvcrt:
_A : Optional[int] = WindowsFileLock
elif fcntl:
_A : Optional[int] = UnixFileLock
else:
_A : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 229 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _lowercase ( datasets.BuilderConfig ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class _lowercase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = PandasConfig
def a ( self : Union[str, Any] ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE__ , (str, list, tuple) ):
__lowerCAmelCase = data_files
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE__ , gen_kwargs={"""files""": files} ) )
return splits
def a ( self : Any , SCREAMING_SNAKE_CASE__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase = table_cast(SCREAMING_SNAKE_CASE__ , self.config.features.arrow_schema )
return pa_table
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
for i, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) ):
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as f:
__lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(SCREAMING_SNAKE_CASE__ ) )
yield i, self._cast_table(SCREAMING_SNAKE_CASE__ )
| 229 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
"""simple docstring"""
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> Union[str, Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( a_ : Optional[int] , a_ : Any=0 ) -> Optional[Any]:
return sorted(a_ , key=lambda a_ : x[column] )
def __lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int] , a_ : str=float('''inf''' ) ) -> str:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :Optional[Any] = current_dis
return min_dis
def __lowerCamelCase ( a_ : List[Any] , a_ : Any , a_ : Optional[int]=float('''inf''' ) ) -> Optional[Any]:
for i in range(min(6 , points_counts - 1 ) , a_ ):
for j in range(max(0 , i - 6 ) , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :int = current_dis
return min_dis
def __lowerCamelCase ( a_ : str , a_ : List[Any] , a_ : int ) -> Optional[int]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a_ , a_ )
# recursion
__SCREAMING_SNAKE_CASE :int = points_counts // 2
__SCREAMING_SNAKE_CASE :Dict = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[:mid] , a_ )
__SCREAMING_SNAKE_CASE :Any = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[mid:] , points_counts - mid )
__SCREAMING_SNAKE_CASE :Union[str, Any] = min(a_ , a_ )
__SCREAMING_SNAKE_CASE :str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a_ )
__SCREAMING_SNAKE_CASE :Dict = dis_between_closest_in_strip(
a_ , len(a_ ) , a_ )
return min(a_ , a_ )
def __lowerCamelCase ( a_ : int , a_ : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = column_based_sort(a_ , column=0 )
__SCREAMING_SNAKE_CASE :int = column_based_sort(a_ , column=1 )
return (
closest_pair_of_points_sqr(
a_ , a_ , a_ )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 239 | 0 |
def UpperCamelCase ( snake_case__ : list[int] ) -> list[int]:
UpperCamelCase : Any = len(snake_case__ )
for i in range(snake_case__ ):
for j in range(i + 1 , snake_case__ ):
if numbers[j] < numbers[i]:
UpperCamelCase , UpperCamelCase : List[str] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__UpperCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 119 |
from __future__ import annotations
__UpperCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Union[str, Any] = graph
# mapping node to its parent in resulting breadth first tree
UpperCamelCase : dict[str, str | None] = {}
UpperCamelCase : Union[str, Any] = source_vertex
def snake_case_ ( self ) -> None:
UpperCamelCase : str = {self.source_vertex}
UpperCamelCase : str = None
UpperCamelCase : int = [self.source_vertex] # first in first out queue
while queue:
UpperCamelCase : Dict = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = vertex
queue.append(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCamelCase : Optional[Any] = self.parent.get(SCREAMING_SNAKE_CASE_ )
if target_vertex_parent is None:
UpperCamelCase : Union[str, Any] = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
return self.shortest_path(SCREAMING_SNAKE_CASE_ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 119 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : int = 'transfo-xl'
snake_case__ : Dict = ['mems']
snake_case__ : Union[str, Any] = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __lowerCAmelCase=26_7735 , __lowerCAmelCase=[2_0000, 4_0000, 20_0000] , __lowerCAmelCase=1024 , __lowerCAmelCase=1024 , __lowerCAmelCase=16 , __lowerCAmelCase=64 , __lowerCAmelCase=4096 , __lowerCAmelCase=4 , __lowerCAmelCase=False , __lowerCAmelCase=18 , __lowerCAmelCase=1600 , __lowerCAmelCase=1000 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=-1 , __lowerCAmelCase=True , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="normal" , __lowerCAmelCase=0.0_1 , __lowerCAmelCase=0.0_1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-5 , __lowerCAmelCase=0 , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = vocab_size
lowercase = []
self.cutoffs.extend(__lowerCAmelCase )
if proj_share_all_but_first:
lowercase = [False] + [True] * len(self.cutoffs )
else:
lowercase = [False] + [False] * len(self.cutoffs )
lowercase = d_model
lowercase = d_embed
lowercase = d_head
lowercase = d_inner
lowercase = div_val
lowercase = pre_lnorm
lowercase = n_layer
lowercase = n_head
lowercase = mem_len
lowercase = same_length
lowercase = attn_type
lowercase = clamp_len
lowercase = sample_softmax
lowercase = adaptive
lowercase = dropout
lowercase = dropatt
lowercase = untie_r
lowercase = init
lowercase = init_range
lowercase = proj_init_std
lowercase = init_std
lowercase = layer_norm_epsilon
super().__init__(eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 32 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = GPTSanJapaneseTokenizer
snake_case__ : int = False
snake_case__ : Tuple = {'do_clean_text': False, 'add_prefix_space': False}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowercase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.get_input_output_texts(__lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、世界。 こんばんは、㔺界。"""
lowercase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowercase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。こんばんは、世界。😀"""
lowercase = tokenizer.encode(prefix_text + input_text )
lowercase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowercase = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = [1] + [0] * (len_prefix + len_text + 1)
lowercase = [1] * (len_prefix + len_text + 1) + [0]
lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase = tokenizer(prefix_text + input_text ).token_type_ids
lowercase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = tokenizer.encode("""あンいワ""" )
lowercase = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowercase = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowercase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowercase = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
lowercase = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
pass
| 32 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = UnCLIPImageVariationPipeline
SCREAMING_SNAKE_CASE = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
SCREAMING_SNAKE_CASE = IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
SCREAMING_SNAKE_CASE = False
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim
@property
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__a =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__snake_case )
@property
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__a =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__snake_case )
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__a ={
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
__a =UnCLIPTextProjModel(**__snake_case )
return model
@property
def __magic_name__ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__a ={
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
__a =UNetaDConditionModel(**__snake_case )
return model
@property
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__a =UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
__a =UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.dummy_decoder
__a =self.dummy_text_proj
__a =self.dummy_text_encoder
__a =self.dummy_tokenizer
__a =self.dummy_super_res_first
__a =self.dummy_super_res_last
__a =UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1000 , )
__a =UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1000 , )
__a =CLIPImageProcessor(crop_size=32 , size=32 )
__a =self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __magic_name__ ( self , __snake_case , __snake_case=0 , __snake_case=True ) -> Union[str, Any]:
'''simple docstring'''
__a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('mps' ):
__a =torch.manual_seed(__snake_case )
else:
__a =torch.Generator(device=__snake_case ).manual_seed(__snake_case )
if pil_image:
__a =input_image * 0.5 + 0.5
__a =input_image.clamp(0 , 1 )
__a =input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a =DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a ='cpu'
__a =self.get_dummy_components()
__a =self.pipeline_class(**__snake_case )
__a =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
__a =pipe(**__snake_case )
__a =output.images
__a =self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
__a =pipe(
**__snake_case , return_dict=__snake_case , )[0]
__a =image[0, -3:, -3:, -1]
__a =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a =np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a ='cpu'
__a =self.get_dummy_components()
__a =self.pipeline_class(**__snake_case )
__a =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
__a =pipe(**__snake_case )
__a =output.images
__a =self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
__a =pipe(
**__snake_case , return_dict=__snake_case , )[0]
__a =image[0, -3:, -3:, -1]
__a =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a =np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a ='cpu'
__a =self.get_dummy_components()
__a =self.pipeline_class(**__snake_case )
__a =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
__a =[
pipeline_inputs['image'],
pipeline_inputs['image'],
]
__a =pipe(**__snake_case )
__a =output.images
__a =self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
__a =[
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
__a =pipe(
**__snake_case , return_dict=__snake_case , )[0]
__a =image[0, -3:, -3:, -1]
__a =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__a =np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =torch.device('cpu' )
class __magic_name__ :
SCREAMING_SNAKE_CASE = 1
__a =self.get_dummy_components()
__a =self.pipeline_class(**__snake_case )
__a =pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =torch.Generator(device=__snake_case ).manual_seed(0 )
__a =pipe.decoder.dtype
__a =1
__a =(
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__a =pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
__a =(
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__a =pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
__a =self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
__a =pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case ).images
__a =self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
# Don't pass image, instead pass embedding
__a =pipeline_inputs.pop('image' )
__a =pipe.image_encoder(__snake_case ).image_embeds
__a =pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case , image_embeddings=__snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__a =1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__snake_case , expected_max_diff=__snake_case )
@skip_mps
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =torch_device == 'cpu'
__a =True
__a =[
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=__snake_case , relax_max_difference=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =[
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__a =[2, 3]
self._test_inference_batch_consistent(
batch_sizes=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__snake_case )
@skip_mps
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> str:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
__a =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
__a =UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
__a =pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__a =torch.Generator(device='cpu' ).manual_seed(0 )
__a =pipeline(
__snake_case , generator=__snake_case , output_type='np' , )
__a =output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__snake_case , __snake_case , 15 )
| 218 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_lowerCAmelCase : Optional[Any] = False
class __magic_name__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__a =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__a =torch.manual_seed(0 )
__a =pipe(
image=__snake_case , generator=__snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__a =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__a =np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 218 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 198 |
from manim import *
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
A__ = Rectangle(height=0.5 , width=0.5 )
A__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A__ = [mem.copy() for i in range(6 )]
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = Text("CPU" , font_size=24 )
A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase__ )
A__ = [mem.copy() for i in range(4 )]
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = Text("GPU" , font_size=24 )
A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase__ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = Text("Model" , font_size=24 )
A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase__ )
A__ = []
for i, rect in enumerate(UpperCAmelCase__ ):
rect.set_stroke(UpperCAmelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
A__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=UpperCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=UpperCAmelCase__ , buff=0.0 )
self.add(UpperCAmelCase__ )
cpu_targs.append(UpperCAmelCase__ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = Text("Loaded Checkpoint" , font_size=24 )
A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , aligned_edge=UpperCAmelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
A__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A__ = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(UpperCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
A__ = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase__ ) , Write(UpperCAmelCase__ ) )
self.play(Write(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) )
A__ = []
A__ = []
for i, rect in enumerate(UpperCAmelCase__ ):
A__ = fill.copy().set_fill(UpperCAmelCase__ , opacity=0.7 )
target.move_to(UpperCAmelCase__ )
first_animations.append(GrowFromCenter(UpperCAmelCase__ , run_time=1 ) )
A__ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(UpperCAmelCase__ , run_time=1.5 ) )
self.play(*UpperCAmelCase__ )
self.play(*UpperCAmelCase__ )
self.wait()
| 198 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( __A ):
a__ = """new-model"""
if is_tf_available():
class __snake_case ( __A ):
a__ = NewModelConfig
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[int] = "bert-base-cased"
a__: Dict = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a__: Tuple = TFAutoModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: str = "bert-base-cased"
a__: Any = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a__: Dict = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: List[str] = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a__: Optional[Any] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_)
a__: Dict = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , output_loading_info=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Any = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a__: List[Any] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Optional[int] = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a__: Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_)
a__: Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , output_loading_info=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: int = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a__: Any = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_)
a__: List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , output_loading_info=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__: Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a__: Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__: int = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a__: List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
@slow
@require_tensorflow_probability
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
a__: Dict = AutoConfig.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a__: int = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCAmelCase_)
a__: List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: str = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_) , 1_44_10)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: str = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_) , 1_44_10)
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny')
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
a__: Any = copy.deepcopy(model.config)
a__: Dict = ["FunnelBaseModel"]
a__: Union[str, Any] = TFAutoModel.from_config(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase_)
a__: Any = TFAutoModel.from_pretrained(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
try:
AutoConfig.register('new-model' , lowerCAmelCase_)
a__: Union[str, Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(lowerCAmelCase_):
auto_class.register(lowerCAmelCase_ , lowerCAmelCase_)
auto_class.register(lowerCAmelCase_ , lowerCAmelCase_)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_):
auto_class.register(lowerCAmelCase_ , lowerCAmelCase_)
# Now that the config is registered, it can be used as any other config with the auto-API
a__: List[Any] = BertModelTester(self).get_config()
a__: Union[str, Any] = NewModelConfig(**tiny_config.to_dict())
a__: int = auto_class.from_config(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase_)
a__: Dict = auto_class.from_pretrained(lowerCAmelCase_)
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase_ , 'bert-base is not a local folder and is not a valid model identifier'):
a__: str = TFAutoModel.from_pretrained('bert-base')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
a__: Union[str, Any] = TFAutoModel.from_pretrained(lowerCAmelCase_ , revision='aaaaaa')
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase_ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
a__: List[Any] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(lowerCAmelCase_ , 'Use `from_pt=True` to load this model'):
a__: Union[str, Any] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[Any] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
a__: str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
a__: Dict = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
with RequestCounter() as counter:
a__: int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 290 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 268 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase :list[list[int]] = []
create_all_state(1 , __magic_name__ , __magic_name__ , [] , __magic_name__ )
return result
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : list[list[int]] , ) -> None:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__magic_name__ , total_number - level + 2 ):
current_list.append(__magic_name__ )
create_all_state(i + 1 , __magic_name__ , level - 1 , __magic_name__ , __magic_name__ )
current_list.pop()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] ) -> None:
"""simple docstring"""
for i in total_list:
print(*__magic_name__ )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = 4
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : Optional[Any] = generate_all_combinations(n, k)
print_all_state(total_list)
| 350 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[int] = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 62 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: int ):
while second != 0:
__SCREAMING_SNAKE_CASE : Dict = first & second
first ^= second
__SCREAMING_SNAKE_CASE : Optional[int] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : Optional[int] = int(input('''Enter the first number: ''').strip())
UpperCamelCase__ : str = int(input('''Enter the second number: ''').strip())
print(f"{add(first, second) = }")
| 112 |
'''simple docstring'''
from collections import defaultdict
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> bool:
lowercase_ : Tuple = first_str.lower().strip()
lowercase_ : List[Any] = second_str.lower().strip()
# Remove whitespace
lowercase_ : Dict = first_str.replace(""" """ , """""" )
lowercase_ : Tuple = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
return False
# Default values for count should be 0
lowercase_ : defaultdict[str, int] = defaultdict(UpperCAmelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCAmelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : int = input("Enter the first string ").strip()
_lowercase : Tuple = input("Enter the second string ").strip()
_lowercase : Union[str, Any] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 239 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowerCamelCase : Tuple = logging.get_logger(__name__)
class A( __a ):
'''simple docstring'''
def __init__( self : str , *A_ : str , **A_ : Any ) -> None:
"""simple docstring"""
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 369 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , A_ : int , A_ : Any=7 , A_ : Tuple=3 , A_ : Union[str, Any]=18 , A_ : Tuple=30 , A_ : Union[str, Any]=400 , A_ : Optional[int]=True , A_ : List[Any]=None , A_ : Dict=True , A_ : Union[str, Any]=None , A_ : Optional[int]=True , A_ : str=[0.48145466, 0.4578275, 0.40821073] , A_ : Tuple=[0.26862954, 0.26130258, 0.27577711] , A_ : Any=True , ) -> str:
"""simple docstring"""
lowerCamelCase_ = size if size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_convert_rgb
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def a__ ( self : Any , A_ : Any=False , A_ : Dict=False , A_ : str=False ) -> Union[str, Any]:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowerCamelCase_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowerCamelCase_ = []
for i in range(self.batch_size ):
lowerCamelCase_ , lowerCamelCase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowerCamelCase_ = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowerCamelCase_ = [torch.from_numpy(A_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def a__ ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=A_ )
@property
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_center_crop' ) )
self.assertTrue(hasattr(A_ , 'center_crop' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_convert_rgb' ) )
def a__ ( self : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def a__ ( self : str ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=A_ )
lowerCamelCase_ = 3
@property
def a__ ( self : Any ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_center_crop' ) )
self.assertTrue(hasattr(A_ , 'center_crop' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_convert_rgb' ) )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 208 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : UNetaDModel
snake_case__ : ScoreSdeVeScheduler
def __init__( self : str , SCREAMING_SNAKE_CASE__ : UNetaDModel , SCREAMING_SNAKE_CASE__ : ScoreSdeVeScheduler ) -> Optional[int]:
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self : str , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[ImagePipelineOutput, Tuple]:
a_ : Tuple = self.unet.config.sample_size
a_ : Optional[Any] = (batch_size, 3, img_size, img_size)
a_ : Optional[Any] = self.unet
a_ : List[str] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ) * self.scheduler.init_noise_sigma
a_ : Tuple = sample.to(self.device )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
self.scheduler.set_sigmas(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
a_ : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
a_ : Tuple = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
a_ : Optional[Any] = self.scheduler.step_correct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# prediction step
a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
a_ : Tuple = self.scheduler.step_pred(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
a_ , a_ : Union[str, Any] = output.prev_sample, output.prev_sample_mean
a_ : Dict = sample_mean.clamp(0 , 1 )
a_ : Optional[Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a_ : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 32 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str:
"""simple docstring"""
a_ : Tuple = []
for line in lines:
a_ : Any = re.sub(R'#.*' , '' , __A ) # remove comments
if line:
filtered_lines.append(__A )
a_ : Tuple = '\n'.join(__A )
# Make a hash from all this code
a_ : Tuple = full_str.encode('utf-8' )
return shaaaa(__A ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase_ : List[Any] = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase_ : Dict = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase_ : Optional[int] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCAmelCase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 32 | 1 |
import sys
from collections import defaultdict
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : Optional[int] = []
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.node_position[vertex]
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : Any = pos
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A_ : Optional[int] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A_ : Any = 2 * start + 1
else:
A_ : int = 2 * start + 2
if heap[smallest_child] < heap[start]:
A_ : Optional[Any] = heap[smallest_child], positions[smallest_child]
A_ : List[str] = (
heap[start],
positions[start],
)
A_ : str = temp, tempa
A_ : List[str] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowercase )
self.top_to_bottom(lowercase , lowercase , lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : List[Any] = position[index]
while index != 0:
A_ : Tuple = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A_ : List[str] = heap[parent]
A_ : Union[str, Any] = position[parent]
self.set_position(position[parent] , lowercase )
else:
A_ : Union[str, Any] = val
A_ : int = temp
self.set_position(lowercase , lowercase )
break
A_ : Optional[Any] = parent
else:
A_ : List[str] = val
A_ : Tuple = temp
self.set_position(lowercase , 0 )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : str = len(lowercase ) // 2 - 1
for i in range(lowercase , -1 , -1 ):
self.top_to_bottom(lowercase , lowercase , len(lowercase ) , lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : int = positions[0]
A_ : str = sys.maxsize
self.top_to_bottom(lowercase , 0 , len(lowercase ) , lowercase )
return temp
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
A_ : Dict = Heap()
A_ : List[Any] = [0] * len(__lowercase )
A_ : str = [-1] * len(__lowercase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A_ : Any = [] # Heap of Distance of vertices from their neighboring vertex
A_ : Dict = []
for vertex in range(len(__lowercase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowercase )
heap.node_position.append(__lowercase )
A_ : Dict = []
A_ : Optional[Any] = 1
A_ : str = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A_ : List[str] = 0
A_ : Tuple = distance
heap.heapify(__lowercase ,__lowercase )
for _ in range(1 ,len(__lowercase ) ):
A_ : Any = heap.delete_minimum(__lowercase ,__lowercase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A_ : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowercase )]
):
A_ : Optional[Any] = distance
heap.bottom_to_top(
__lowercase ,heap.get_position(__lowercase ) ,__lowercase ,__lowercase )
A_ : Optional[Any] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_UpperCAmelCase = int(input("""Enter number of edges: """).strip())
_UpperCAmelCase = defaultdict(list)
for _ in range(edges_number):
_UpperCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 354 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 192 | 0 |
'''simple docstring'''
import string
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Union[str, Any] = ''''''
for i in sequence:
lowercase__ : Union[str, Any] = ord(UpperCAmelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Tuple = string.ascii_letters
lowercase__ : int = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(UpperCAmelCase )] if c in letters else c for c in sequence )
def __UpperCamelCase ( ):
from timeit import timeit
print('''Running performance benchmarks...''' )
lowercase__ : Union[str, Any] = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F"""> atbash_slow(): {timeit('atbash_slow(printable)' , setup=UpperCAmelCase )} seconds""" )
print(F"""> atbash(): {timeit('atbash(printable)' , setup=UpperCAmelCase )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 198 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> List[str]:
super().__init__()
lowercase__ : List[str] = model
lowercase__ : Dict = 2
lowercase__ : Any = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase( self ) -> str:
pass
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# load longformer model from model identifier
lowercase__ : Dict = LongformerModel.from_pretrained(UpperCAmelCase )
lowercase__ : List[str] = LightningModel(UpperCAmelCase )
lowercase__ : List[Any] = torch.load(UpperCAmelCase , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase__ : Optional[int] = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a: Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 198 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCAmelCase_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
f''' reinstalling {pkg}.''' )
if not ops[op](version.parse(_UpperCamelCase ) , version.parse(_UpperCamelCase ) ):
raise ImportError(
f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = None ) -> None:
"""simple docstring"""
snake_case_ : int = f'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , _UpperCamelCase ):
snake_case_ , snake_case_ , snake_case_ : int = requirement, None, None
else:
snake_case_ : Dict = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f''' got {requirement}''' )
snake_case_ , snake_case_ : Union[str, Any] = match[0]
snake_case_ : int = want_full.split(''',''' ) # there could be multiple requirements
snake_case_ : List[Any] = {}
for w in want_range:
snake_case_ : int = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f''' but got {requirement}''' )
snake_case_ , snake_case_ : Tuple = match[0]
snake_case_ : Dict = want_ver
if op not in ops:
raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
snake_case_ : str = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return
# check if any version is installed
try:
snake_case_ : Dict = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase , _UpperCamelCase )
| 279 |
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
snake_case_ : List[str] = len(bin(_UpperCamelCase )[3:] )
snake_case_ : str = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
snake_case_ : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase_ :
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( *__lowercase , **__lowercase) -> Union[str, Any]:
pass
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Tuple:
__UpperCamelCase :Optional[int] = DepthEstimationPipeline(model=A_ , image_processor=A_)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Tuple:
__UpperCamelCase :Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)} , A_)
import datasets
__UpperCamelCase :Dict = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''')
__UpperCamelCase :List[Any] = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
])
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
] , A_ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[str] = '''Intel/dpt-large'''
__UpperCamelCase :int = pipeline('''depth-estimation''' , model=A_)
__UpperCamelCase :Tuple = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''')
__UpperCamelCase :List[Any] = hashimage(outputs['''depth'''])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()) , 29.3_04)
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()) , 2.6_62)
@require_torch
def UpperCamelCase__ ( self) -> List[Any]:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''')
| 43 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __get__(self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=None) ->Union[str, Any]:
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
lowerCamelCase__: int ="__cached_" + self.fget.__name__
lowerCamelCase__: Any =getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if cached is None:
lowerCamelCase__: str =self.fget(UpperCAmelCase_)
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return cached
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: List[str] =val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if is_torch_fx_proxy(__a ):
return True
if is_torch_available():
import torch
if isinstance(__a , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__a , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__a , (jnp.ndarray, Tracer) ):
return True
return isinstance(__a , np.ndarray )
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
return isinstance(__a , np.ndarray )
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
return _is_numpy(__a )
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
import torch
return isinstance(__a , torch.Tensor )
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(__a )
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
import torch
return isinstance(__a , torch.device )
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(__a )
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
import torch
if isinstance(__a , __a ):
if hasattr(__a , __a ):
lowerCamelCase__: List[Any] =getattr(__a , __a )
else:
return False
return isinstance(__a , torch.dtype )
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(__a )
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
import tensorflow as tf
return isinstance(__a , tf.Tensor )
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(__a )
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__a , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(__a )
return type(__a ) == tf.Tensor
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(__a )
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(__a , jnp.ndarray )
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(__a )
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
if isinstance(__a , (dict, UserDict) ):
return {k: to_py_obj(__a ) for k, v in obj.items()}
elif isinstance(__a , (list, tuple) ):
return [to_py_obj(__a ) for o in obj]
elif is_tf_tensor(__a ):
return obj.numpy().tolist()
elif is_torch_tensor(__a ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__a ):
return np.asarray(__a ).tolist()
elif isinstance(__a , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
if isinstance(__a , (dict, UserDict) ):
return {k: to_numpy(__a ) for k, v in obj.items()}
elif isinstance(__a , (list, tuple) ):
return np.array(__a )
elif is_tf_tensor(__a ):
return obj.numpy()
elif is_torch_tensor(__a ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__a ):
return np.asarray(__a )
else:
return obj
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =fields(self)
# Safety and consistency checks
if not len(UpperCAmelCase_):
raise ValueError(F"""{self.__class__.__name__} has no fields.""")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""")
lowerCamelCase__: Union[str, Any] =getattr(self , class_fields[0].name)
lowerCamelCase__: Dict =all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(UpperCAmelCase_):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: int =first_field.items()
lowerCamelCase__: Any =True
else:
try:
lowerCamelCase__: Optional[int] =iter(UpperCAmelCase_)
lowerCamelCase__: Tuple =True
except TypeError:
lowerCamelCase__: Optional[int] =False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCAmelCase_):
if (
not isinstance(UpperCAmelCase_ , (list, tuple))
or not len(UpperCAmelCase_) == 2
or not isinstance(element[0] , UpperCAmelCase_)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCamelCase__: Optional[Any] =first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""")
break
setattr(self , element[0] , element[1])
if element[1] is not None:
lowerCamelCase__: List[Any] =element[1]
elif first_field is not None:
lowerCamelCase__: Optional[int] =first_field
else:
for field in class_fields:
lowerCamelCase__: Optional[int] =getattr(self , field.name)
if v is not None:
lowerCamelCase__: Optional[int] =v
def __delitem__(self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""")
def SCREAMING_SNAKE_CASE_ (self : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int) ->Any:
'''simple docstring'''
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""")
def SCREAMING_SNAKE_CASE_ (self : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict) ->List[Any]:
'''simple docstring'''
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""")
def SCREAMING_SNAKE_CASE_ (self : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""")
def __getitem__(self : Dict , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Any =dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any) ->List[str]:
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCAmelCase_ , UpperCAmelCase_)
super().__setattr__(UpperCAmelCase_ , UpperCAmelCase_)
def __setitem__(self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict) ->Optional[Any]:
'''simple docstring'''
super().__setitem__(UpperCAmelCase_ , UpperCAmelCase_)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple[Any]:
'''simple docstring'''
return tuple(self[k] for k in self.keys())
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , UpperCAmelCase_ : Dict) ->Tuple:
'''simple docstring'''
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}""")
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longest"
lowercase_ = "max_length"
lowercase_ = "do_not_pad"
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "pt"
lowercase_ = "tf"
lowercase_ = "np"
lowercase_ = "jax"
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCAmelCase_ : List[ContextManager]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =context_managers
lowerCamelCase__: Dict =ExitStack()
def __enter__(self : int) ->List[Any]:
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(UpperCAmelCase_)
def __exit__(self : str , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any]) ->int:
'''simple docstring'''
self.stack.__exit__(*UpperCAmelCase_ , **UpperCAmelCase_)
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[str] =infer_framework(__a )
if framework == "tf":
lowerCamelCase__: Union[str, Any] =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__: Any =inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__: Dict =inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Dict =model_class.__name__
lowerCamelCase__: List[Any] =infer_framework(__a )
if framework == "tf":
lowerCamelCase__: Dict =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__: Optional[Any] =inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__: Optional[Any] =inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCAmelCase_ ( __a , __a = "" , __a = "." ) -> Optional[int]:
"""simple docstring"""
def _flatten_dict(__a , __a="" , __a="." ):
for k, v in d.items():
lowerCamelCase__: Union[str, Any] =str(__a ) + delimiter + str(__a ) if parent_key else k
if v and isinstance(__a , __a ):
yield from flatten_dict(__a , __a , delimiter=__a ).items()
else:
yield key, v
return dict(_flatten_dict(__a , __a , __a ) )
@contextmanager
def lowerCAmelCase_ ( __a , __a = False ) -> Optional[int]:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCAmelCase_ ( __a , __a=None ) -> Union[str, Any]:
"""simple docstring"""
if is_numpy_array(__a ):
return np.transpose(__a , axes=__a )
elif is_torch_tensor(__a ):
return array.T if axes is None else array.permute(*__a )
elif is_tf_tensor(__a ):
import tensorflow as tf
return tf.transpose(__a , perm=__a )
elif is_jax_tensor(__a ):
return jnp.transpose(__a , axes=__a )
else:
raise ValueError(F"""Type not supported for transpose: {type(__a )}.""" )
def lowerCAmelCase_ ( __a , __a ) -> Dict:
"""simple docstring"""
if is_numpy_array(__a ):
return np.reshape(__a , __a )
elif is_torch_tensor(__a ):
return array.reshape(*__a )
elif is_tf_tensor(__a ):
import tensorflow as tf
return tf.reshape(__a , __a )
elif is_jax_tensor(__a ):
return jnp.reshape(__a , __a )
else:
raise ValueError(F"""Type not supported for reshape: {type(__a )}.""" )
def lowerCAmelCase_ ( __a , __a=None ) -> List[str]:
"""simple docstring"""
if is_numpy_array(__a ):
return np.squeeze(__a , axis=__a )
elif is_torch_tensor(__a ):
return array.squeeze() if axis is None else array.squeeze(dim=__a )
elif is_tf_tensor(__a ):
import tensorflow as tf
return tf.squeeze(__a , axis=__a )
elif is_jax_tensor(__a ):
return jnp.squeeze(__a , axis=__a )
else:
raise ValueError(F"""Type not supported for squeeze: {type(__a )}.""" )
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
if is_numpy_array(__a ):
return np.expand_dims(__a , __a )
elif is_torch_tensor(__a ):
return array.unsqueeze(dim=__a )
elif is_tf_tensor(__a ):
import tensorflow as tf
return tf.expand_dims(__a , axis=__a )
elif is_jax_tensor(__a ):
return jnp.expand_dims(__a , axis=__a )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(__a )}.""" )
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
if is_numpy_array(__a ):
return np.size(__a )
elif is_torch_tensor(__a ):
return array.numel()
elif is_tf_tensor(__a ):
import tensorflow as tf
return tf.size(__a )
elif is_jax_tensor(__a ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(__a )}.""" )
def lowerCAmelCase_ ( __a , __a ) -> Optional[int]:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(__a , (tuple, list) ):
lowerCamelCase__: Union[str, Any] =[F"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCamelCase__: Tuple =F"""{repo_id}--{value}"""
return auto_map
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
for base_class in inspect.getmro(__a ):
lowerCamelCase__: Tuple =base_class.__module__
lowerCamelCase__: str =base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 273 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A = direct_transformers_import(PATH_TO_TRANSFORMERS)
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: List[Any] =None
# source code of `config_class`
lowerCamelCase__: List[Any] =inspect.getsource(__a )
lowerCamelCase__: str =_re_checkpoint.findall(__a )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
lowerCamelCase__: str =ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCamelCase__: Optional[int] =F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowerCamelCase__: List[Any] =ckpt_name
break
return checkpoint
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =[]
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCamelCase__: List[Any] =get_checkpoint_from_config_class(__a )
lowerCamelCase__: Tuple =config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
lowerCamelCase__: List[str] ="\n".join(sorted(__a ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 273 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : str = logging.get_logger(__name__)
a__ : Optional[int] = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class lowercase_ ( a__ , a__ ):
__UpperCAmelCase = 'convnextv2'
def __init__( self , a=3 , a=4 , a=4 , a=None , a=None , a="gelu" , a=0.02 , a=1e-12 , a=0.0 , a=2_24 , a=None , a=None , **a , ):
super().__init__(**a )
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_stages
UpperCamelCase__ = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
UpperCamelCase__ = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase__ = hidden_act
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = image_size
UpperCamelCase__ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names )
| 80 |
'''simple docstring'''
_UpperCamelCase = tuple[float, float, float]
_UpperCamelCase = tuple[float, float, float]
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Vectorad:
__lowerCamelCase : Any = end_pointa[0] - end_pointa[0]
__lowerCamelCase : str = end_pointa[1] - end_pointa[1]
__lowerCamelCase : Tuple = end_pointa[2] - end_pointa[2]
return (x, y, z)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Vectorad:
__lowerCamelCase : List[str] = ab[1] * ac[2] - ab[2] * ac[1] # *i
__lowerCamelCase : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__lowerCamelCase : List[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> bool:
return tuple(round(_lowerCAmelCase ,_lowerCAmelCase ) for x in vector ) == (0, 0, 0)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 10 ) -> bool:
__lowerCamelCase : str = create_vector(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : Dict = create_vector(_lowerCAmelCase ,_lowerCAmelCase )
return is_zero_vector(get_ad_vectors_cross(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase )
| 208 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ = '''src/diffusers'''
UpperCamelCase__ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ = spec.loader.load_module()
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ):
return line.startswith(SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , SCREAMING_SNAKE_CASE_ ) is not None
def _a ( SCREAMING_SNAKE_CASE_ : Any ):
__lowerCAmelCase = object_name.split("." )
__lowerCAmelCase = 0
# First let's find the module where our object lives.
__lowerCAmelCase = parts[i]
while i < len(SCREAMING_SNAKE_CASE_ ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , F"""{module}.py""" ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , F"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Now let's find the class / func in the code!
__lowerCAmelCase = """"""
__lowerCAmelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE_ ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__lowerCAmelCase = line_index
while line_index < len(SCREAMING_SNAKE_CASE_ ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowerCAmelCase = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
UpperCamelCase__ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
UpperCamelCase__ = re.compile(R"""<FILL\s+[^>]*>""")
def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCAmelCase = code.split("\n" )
__lowerCAmelCase = 0
while idx < len(SCREAMING_SNAKE_CASE_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE_ ):
return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = len(get_indent(SCREAMING_SNAKE_CASE_ ) ) > 0
if has_indent:
__lowerCAmelCase = F"""class Bla:\n{code}"""
__lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = black.format_str(SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = style_docstrings_in_code(SCREAMING_SNAKE_CASE_ )
return result[len("class Bla:\n" ) :] if has_indent else result
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any=False ):
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = []
__lowerCAmelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__lowerCAmelCase = search.groups()
__lowerCAmelCase = find_code_in_diffusers(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = get_indent(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
__lowerCAmelCase = theoretical_indent
__lowerCAmelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__lowerCAmelCase = True
while line_index < len(SCREAMING_SNAKE_CASE_ ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
break
__lowerCAmelCase = lines[line_index]
__lowerCAmelCase = _should_continue(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and re.search(F"""^{indent}# End copy""" , SCREAMING_SNAKE_CASE_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowerCAmelCase = lines[start_index:line_index]
__lowerCAmelCase = """""".join(SCREAMING_SNAKE_CASE_ )
# Remove any nested `Copied from` comments to avoid circular copies
__lowerCAmelCase = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE_ ) is None]
__lowerCAmelCase = """\n""".join(SCREAMING_SNAKE_CASE_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
__lowerCAmelCase = replace_pattern.replace("with" , "" ).split("," )
__lowerCAmelCase = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__lowerCAmelCase = pattern.groups()
__lowerCAmelCase = re.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if option.strip() == "all-casing":
__lowerCAmelCase = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__lowerCAmelCase = blackify(lines[start_index - 1] + theoretical_code )
__lowerCAmelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__lowerCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
__lowerCAmelCase = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE_ ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
return diffs
def _a ( SCREAMING_SNAKE_CASE_ : bool = False ):
__lowerCAmelCase = glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , "**/*.py" ) , recursive=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = []
for filename in all_files:
__lowerCAmelCase = is_copy_consistent(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE_ ) > 0:
__lowerCAmelCase = """\n""".join(SCREAMING_SNAKE_CASE_ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCamelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 368 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModel(config=_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTForMaskedImageModeling(config=_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFDeiTForMaskedImageModeling(_A )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = TFDeiTForImageClassification(_A )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFDeiTForImageClassification(_A )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_a : Optional[Any] = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_a : str = False
_a : str = False
_a : List[str] = False
_a : Optional[int] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Dense ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFDeiTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="tf" )
# forward pass
__lowerCAmelCase = model(**_A )
# verify the logits
__lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
__lowerCAmelCase = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 102 | 0 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase__( __SCREAMING_SNAKE_CASE : bool = True , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Any ):
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
lowercase_ : int = False
if main_process_only:
lowercase_ : Optional[Any] = PartialState().local_process_index == 0
return _tqdm(*lowercase_ , **lowercase_ , disable=lowercase_ )
| 213 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
A_ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A_ : Tuple = 12_8022
A_ : Optional[Any] = 12_8028
@require_sentencepiece
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Tuple = MaMaaaTokenizer
UpperCAmelCase__: List[Any] = False
UpperCAmelCase__: Any = False
UpperCAmelCase__: Optional[Any] = True
def __A ( self ):
super().setUp()
A__ : Union[str, Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
A__ : Optional[Any] = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : Optional[int] = Path(self.tmpdirname )
save_json(A__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
A__ : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self , **A__ ):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , A__ ):
return (
"This is a test",
"This is a test",
)
def __A ( self ):
A__ : Any = """</s>"""
A__ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__ ) , A__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__ ) , A__ )
def __A ( self ):
A__ : str = self.get_tokenizer()
A__ : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(A__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def __A ( self ):
pass
def __A ( self ):
A__ : Optional[int] = self.get_tokenizer()
A__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A__ ) , [2, 3, 4, 5, 6] , )
A__ : Dict = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(A__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
A__ : Any = tokenizer.convert_tokens_to_string(A__ )
self.assertEqual(A__ , """This is a test""" )
@slow
def __A ( self ):
# fmt: off
A__ : int = {"""input_ids""": [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = '''facebook/m2m100_418M'''
UpperCAmelCase__: Any = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__: Any = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__: List[str] = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def __A ( cls ):
A__ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
A__ : int = 1
return cls
def __A ( self ):
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 12_8063 )
def __A ( self ):
A__ : Optional[Any] = self.tokenizer.get_vocab()
self.assertEqual(len(A__ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , A__ )
def __A ( self ):
A__ : List[Any] = """en"""
A__ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A__ )
def __A ( self ):
self.assertIn(A__ , self.tokenizer.all_special_ids )
# fmt: off
A__ : Dict = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
A__ : Dict = self.tokenizer.decode(A__ , skip_special_tokens=A__ )
A__ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A__ )
self.assertEqual(A__ , A__ )
self.assertNotIn(self.tokenizer.eos_token , A__ )
def __A ( self ):
A__ : str = tempfile.mkdtemp()
A__ : Dict = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(A__ )
A__ : List[Any] = MaMaaaTokenizer.from_pretrained(A__ )
self.assertDictEqual(new_tok.lang_token_to_id , A__ )
@require_torch
def __A ( self ):
A__ : List[str] = """en"""
A__ : List[str] = """fr"""
A__ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A__ , return_tensors="""pt""" )
A__ : int = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
A__ : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __A ( self ):
A__ : List[str] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
A__ : Any = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __A ( self ):
A__ : Optional[int] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
A__ : Any = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __A ( self ):
A__ : Any = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(A__ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[12_8022, 58, 4183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 12_8006,
} , )
| 192 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
_UpperCamelCase:List[str] = ViTImageProcessor if is_vision_available() else None
@property
def _snake_case ( self )-> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self )-> Any:
lowerCamelCase_ =(3, 32, 128)
lowerCamelCase_ =tempfile.mkdtemp()
# fmt: off
lowerCamelCase_ =["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCamelCase_ =dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
lowerCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + """\n""" )
lowerCamelCase_ ={
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowerCamelCase_ =os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> Union[str, Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[int]:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowerCamelCase_ =Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) )
return image_input
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase_ =self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowerCamelCase_ =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.prepare_image_inputs()
lowerCamelCase_ =image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
lowerCamelCase_ =processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ="""test"""
lowerCamelCase_ =processor(text=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ="""test"""
lowerCamelCase_ =self.prepare_image_inputs()
lowerCamelCase_ =processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ =processor.char_decode(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =[seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =None
lowerCamelCase_ =self.prepare_image_inputs()
lowerCamelCase_ =processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =torch.randn(1 , 27 , 38 )
lowerCamelCase_ =torch.randn(1 , 27 , 5_0257 )
lowerCamelCase_ =torch.randn(1 , 27 , 3_0522 )
lowerCamelCase_ =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 361 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A : Any = '▁'
__A : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Any = BertGenerationTokenizer
_UpperCamelCase:List[str] = False
_UpperCamelCase:List[Any] = True
def _snake_case ( self )-> Optional[int]:
super().setUp()
lowerCamelCase_ =BertGenerationTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self )-> Any:
lowerCamelCase_ ="""<s>"""
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1002 )
def _snake_case ( self )-> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self )-> Any:
lowerCamelCase_ =BertGenerationTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ =tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self )-> str:
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ ="""Hello World!"""
lowerCamelCase_ =[1_8536, 2260, 101]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase_ =[
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def _snake_case ( self )-> Any:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase_ =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ =""" """.join(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.big_tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , return_token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =BertGenerationConfig()
lowerCamelCase_ =BertGenerationEncoder(_SCREAMING_SNAKE_CASE )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_SCREAMING_SNAKE_CASE )
model(**_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> int:
# fmt: off
lowerCamelCase_ ={"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 49 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] = "." ) -> List[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_A ):
lowerCAmelCase_ : Dict = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_A )[1] in (".py", ".ipynb"):
yield os.path.join(_A , _A ).lstrip('./' )
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
return f"{i * ' '}*" if i else "\n##"
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_A ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(_A )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def UpperCamelCase_ ( lowerCAmelCase__ : int = "." ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Tuple = ''
for filepath in sorted(good_file_paths(_A ) ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = os.path.split(_A )
if filepath != old_path:
lowerCAmelCase_ : Optional[int] = print_path(_A , _A )
lowerCAmelCase_ : Optional[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase_ : int = f"{filepath}/{filename}".replace(' ' , '%20' )
lowerCAmelCase_ : Dict = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(f"{md_prefix(_A )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(""".""")
| 224 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A )
lowerCAmelCase_ = emb.weight.data
return lin_layer
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] )
lowerCAmelCase_ = checkpoint['''model''']
remove_ignore_keys_(_A )
lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
lowerCAmelCase_ = XGLMConfig(
vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase_ = XGLMForCausalLM(_A )
lowerCAmelCase_ = model.load_state_dict(_A , strict=_A )
print(_A )
lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A = parser.parse_args()
_A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 278 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Dict ,_snake_case : Dict ,_snake_case : Optional[int]=13 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : List[str]=4 ,_snake_case : List[Any]=[10, 20, 30, 40] ,_snake_case : Optional[int]=[2, 2, 3, 2] ,_snake_case : List[Any]=True ,_snake_case : int=True ,_snake_case : Dict=37 ,_snake_case : Optional[int]="gelu" ,_snake_case : Union[str, Any]=10 ,_snake_case : List[Any]=0.02 ,_snake_case : List[Any]=["stage2", "stage3", "stage4"] ,_snake_case : Any=[2, 3, 4] ,_snake_case : str=None ,) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = parent
lowercase__ : str = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Union[str, Any] = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : int = hidden_sizes
lowercase__ : Dict = depths
lowercase__ : Optional[int] = is_training
lowercase__ : Any = use_labels
lowercase__ : Optional[int] = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : int = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : List[str] = out_features
lowercase__ : Optional[int] = out_indices
lowercase__ : int = scope
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Dict ,_snake_case : Any ,_snake_case : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = ConvNextVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Any = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : Optional[int] ,_snake_case : Dict ,_snake_case : List[str] ) -> str:
"""simple docstring"""
lowercase__ : Tuple = ConvNextVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : int ,_snake_case : Any ,_snake_case : List[Any] ,_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = ConvNextVaBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : Dict = None
lowercase__ : Any = ConvNextVaBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : str = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Any = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Tuple = False
lowerCAmelCase : Dict = False
lowerCAmelCase : Dict = False
lowerCAmelCase : str = False
lowerCAmelCase : Tuple = False
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = ConvNextVaModelTester(self )
lowercase__ : List[Any] = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Any = True
if model_class.__name__ in [
*get_values(_snake_case ),
*get_values(_snake_case ),
]:
continue
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.train()
lowercase__ : List[Any] = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
lowercase__ : Dict = model(**_snake_case ).loss
loss.backward()
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Union[str, Any] = False
lowercase__ : List[Any] = True
if (
model_class.__name__
in [*get_values(_snake_case ), *get_values(_snake_case )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : Tuple = self._prepare_for_class(_snake_case ,_snake_case ,return_labels=_snake_case )
lowercase__ : Optional[int] = model(**_snake_case ).loss
loss.backward()
def UpperCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(_snake_case )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Tuple ,_snake_case : int ,_snake_case : Any ):
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = ConvNextVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_snake_case )
lowercase__ : Any = self.default_image_processor
lowercase__ : Any = prepare_img()
lowercase__ : List[Any] = preprocessor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : List[Any] = model(**_snake_case )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Optional[Any] = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 302 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (a_ ):
UpperCAmelCase__ = (UnCLIPScheduler,)
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_A )
return config
def _lowercase ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_A )
def _lowercase ( self ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_A )
def _lowercase ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def _lowercase ( self ):
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=_A )
def _lowercase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_A )
def _lowercase ( self ):
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_A , prev_timestep=_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(variance_type='''fixed_small_log''' )
UpperCAmelCase = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_99_49_87 ) ) < 1E-5
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(variance_type='''learned_range''' )
UpperCAmelCase = scheduler_class(**_A )
UpperCAmelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_A ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=_A ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=_A ) - -0.0_01_00_11 < 1E-5
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**_A )
UpperCAmelCase = scheduler.timesteps
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(_A ):
# 1. predict noise residual
UpperCAmelCase = model(_A , _A )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(_A ) )
UpperCAmelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**_A )
scheduler.set_timesteps(2_5 )
UpperCAmelCase = scheduler.timesteps
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for i, t in enumerate(_A ):
# 1. predict noise residual
UpperCAmelCase = model(_A , _A )
if i + 1 == timesteps.shape[0]:
UpperCAmelCase = None
else:
UpperCAmelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(
_A , _A , _A , prev_timestep=_A , generator=_A ).prev_sample
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(_A ) )
UpperCAmelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
pass
| 273 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFViTModel(config=_A )
UpperCAmelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
UpperCAmelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self , _A , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
UpperCAmelCase = self.image_size // 2
UpperCAmelCase = pixel_values[:, :, :image_size, :image_size]
UpperCAmelCase = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFViTForImageClassification(_A )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowercase ( self ):
'''simple docstring'''
pass
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(_A )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_A )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A_ (unittest.TestCase ):
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase = model(**_A )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
| 273 | 1 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase = logging.getLogger()
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
snake_case_ = {}
snake_case_ = os.path.join(UpperCAmelCase , 'all_results.json' )
if os.path.exists(UpperCAmelCase ):
with open(UpperCAmelCase , 'r' ) as f:
snake_case_ = json.load(UpperCAmelCase )
else:
raise ValueError(f'can\'t find {path}' )
return results
__UpperCamelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCamelCase ( lowerCAmelCase__ ):
def a_ ( self) -> List[str]:
import xla_spawn
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowerCAmelCase__, 'argv', lowerCAmelCase__):
snake_case_ = time()
xla_spawn.main()
snake_case_ = time()
snake_case_ = get_results(lowerCAmelCase__)
self.assertGreaterEqual(result['eval_accuracy'], 0.75)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start, 500)
def a_ ( self) -> Dict:
import xla_spawn
snake_case_ = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowerCAmelCase__, 'argv', lowerCAmelCase__):
xla_spawn.main()
| 312 |
"""simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | 1 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : int = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
snake_case__ : Dict = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case__ : Dict = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case__ : Tuple = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
snake_case__ : int = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
snake_case__ : List[str] = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
snake_case__ : Any = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
snake_case__ : str = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
snake_case__ : List[str] = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
snake_case__ : Optional[Any] = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
snake_case__ : Union[str, Any] = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
snake_case__ : str = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
snake_case__ : List[Any] = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
snake_case__ : List[Any] = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
snake_case__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case__ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case__ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case__ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case__ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case__ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case__ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case__ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case__ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case__ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_MAPPING
snake_case__ : List[Any] = auto_class_update(FlaxAutoModel)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case__ : List[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case__ : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case__ : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case__ : Any = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : Dict = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case__ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case__ : int = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case__ : Dict = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case__ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case__ : List[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case__ : Optional[Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class snake_case_( _BaseAutoModelClass ):
__UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case__ : List[str] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 60 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE : List[Any] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase ( _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=_snake_case )
def lowercase ( _snake_case : Optional[Any] , _snake_case : Dict ) ->Any:
"""simple docstring"""
__snake_case : List[Any] = tmp_path_factory.getbasetemp() / '''cache'''
__snake_case : int = test_hf_cache_home / '''datasets'''
__snake_case : Tuple = test_hf_cache_home / '''metrics'''
__snake_case : List[str] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_snake_case ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_snake_case ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_snake_case ) )
__snake_case : Optional[int] = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_snake_case ) )
__snake_case : Tuple = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_snake_case ) )
@pytest.fixture(autouse=_snake_case , scope='''session''' )
def lowercase ( ) ->Any:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_snake_case )
def lowercase ( _snake_case : Tuple ) ->Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _snake_case )
@pytest.fixture
def lowercase ( _snake_case : Any ) ->Optional[Any]:
"""simple docstring"""
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _snake_case )
| 102 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase_ , "hidden_sizes"))
self.parent.assertTrue(hasattr(lowercase_ , "neck_hidden_sizes"))
self.parent.assertTrue(hasattr(lowercase_ , "num_attention_heads"))
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict , lowercase_ : str=13 , lowercase_ : int=32 , lowercase_ : Any=2 , lowercase_ : Tuple=3 , lowercase_ : Optional[int]=640 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[int]="silu" , lowercase_ : Tuple=3 , lowercase_ : List[str]=32 , lowercase_ : int=0.1 , lowercase_ : str=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : str=0.02 , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=True , lowercase_ : str=10 , lowercase_ : List[str]=None , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = last_hidden_size
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = conv_kernel_size
_UpperCamelCase = output_stride
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = classifier_dropout_prob
_UpperCamelCase = use_labels
_UpperCamelCase = is_training
_UpperCamelCase = num_labels
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
def __UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = MobileViTModel(config=lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = MobileViTForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = MobileViTForSemanticSegmentation(lowercase_)
model.to(lowercase_)
model.eval()
_UpperCamelCase = model(lowercase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCamelCase = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__A = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = MobileViTModelTester(self)
_UpperCamelCase = MobileViTConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds")
def __UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings")
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViT does not output attentions")
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowercase_)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_)
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def __UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]):
_UpperCamelCase = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(lowercase_ , lowercase_))
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = 5
self.assertEqual(len(lowercase_) , lowercase_)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCamelCase = 2
for i in range(len(lowercase_)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
def __UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_)
@slow
def __UpperCAmelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = MobileViTModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def lowerCAmelCase__ ( ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small") if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small").to(lowercase_)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt").to(lowercase_)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowercase_)
# verify the logits
_UpperCamelCase = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
_UpperCamelCase = torch.tensor([-1.93_64, -1.23_27, -0.46_53]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@slow
def __UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
_UpperCamelCase = model.to(lowercase_)
_UpperCamelCase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt").to(lowercase_)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowercase_)
_UpperCamelCase = outputs.logits
# verify the logits
_UpperCamelCase = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , lowercase_)
_UpperCamelCase = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=lowercase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1e-4))
@slow
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
_UpperCamelCase = model.to(lowercase_)
_UpperCamelCase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small")
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowercase_ , return_tensors="pt").to(lowercase_)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**lowercase_)
_UpperCamelCase = outputs.logits.detach().cpu()
_UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowercase_ , target_sizes=[(50, 60)])
_UpperCamelCase = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , lowercase_)
_UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowercase_)
_UpperCamelCase = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , lowercase_)
| 63 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
lowerCamelCase__ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
lowerCamelCase__ = '''▁'''
# Segments (not really needed)
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 2
lowerCamelCase__ = 3
lowerCamelCase__ = 4
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = '''left'''
__A = XLNetTokenizer
def __init__( self : Tuple , lowercase_ : Optional[Any]=None , lowercase_ : int=None , lowercase_ : int=False , lowercase_ : str=True , lowercase_ : List[str]=False , lowercase_ : List[str]="<s>" , lowercase_ : Union[str, Any]="</s>" , lowercase_ : List[str]="<unk>" , lowercase_ : Optional[Any]="<sep>" , lowercase_ : List[Any]="<pad>" , lowercase_ : List[Any]="<cls>" , lowercase_ : str="<mask>" , lowercase_ : Union[str, Any]=["<eop>", "<eod>"] , **lowercase_ : Tuple , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_) if isinstance(lowercase_ , lowercase_) else mask_token
super().__init__(
vocab_file=lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
_UpperCamelCase = 3
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCAmelCase ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __UpperCAmelCase ( self : Any , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_):
copyfile(self.vocab_file , lowercase_)
return (out_vocab_file,)
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ = list[tuple[int, int]]
lowerCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _lowerCamelCase :
def __init__(self , __a , __a , __a , __a , __a , __a , ) -> Union[str, Any]:
UpperCamelCase = pos_x
UpperCamelCase = pos_y
UpperCamelCase = (pos_y, pos_x)
UpperCamelCase = goal_x
UpperCamelCase = goal_y
UpperCamelCase = g_cost
UpperCamelCase = parent
UpperCamelCase = self.calculate_heuristic()
def snake_case_ (self ) -> Tuple:
UpperCamelCase = abs(self.pos_x - self.goal_x )
UpperCamelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__(self , __a ) -> List[Any]:
return self.f_cost < other.f_cost
class _lowerCamelCase :
def __init__(self , __a , __a ) -> Dict:
UpperCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __SCREAMING_SNAKE_CASE )
UpperCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __SCREAMING_SNAKE_CASE )
UpperCamelCase = [self.start]
UpperCamelCase = []
UpperCamelCase = False
def snake_case_ (self ) -> Optional[int]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCamelCase = True
return self.retrace_path(__SCREAMING_SNAKE_CASE )
self.closed_nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase = self.get_successors(__SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(__SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(__SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def snake_case_ (self , __a ) -> List[str]:
UpperCamelCase = []
for action in delta:
UpperCamelCase = parent.pos_x + action[1]
UpperCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __SCREAMING_SNAKE_CASE , ) )
return successors
def snake_case_ (self , __a ) -> Any:
UpperCamelCase = node
UpperCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCAmelCase__ = (0, 0)
lowerCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
lowerCAmelCase__ = GreedyBestFirst(init, goal)
lowerCAmelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCAmelCase__ = 2
for elem in grid:
print(elem)
| 153 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = LxmertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = LxmertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 49 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = KandinskyImgaImgPipeline
lowerCAmelCase__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
lowerCAmelCase__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
lowerCAmelCase__ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ = False
@property
def UpperCAmelCase__ ( self : Dict ):
return 32
@property
def UpperCAmelCase__ ( self : Any ):
return 32
@property
def UpperCAmelCase__ ( self : str ):
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Any ):
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ):
return 100
@property
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def UpperCAmelCase__ ( self : Tuple ):
torch.manual_seed(0 )
__snake_case: str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
__snake_case: Any = MultilingualCLIP(A )
__snake_case: List[Any] = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
__snake_case: str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__snake_case: Union[str, Any] = UNetaDConditionModel(**A )
return model
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
__snake_case: Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = self.dummy_text_encoder
__snake_case: Optional[Any] = self.dummy_tokenizer
__snake_case: Dict = self.dummy_unet
__snake_case: Optional[int] = self.dummy_movq
__snake_case: Dict = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__snake_case: List[str] = DDIMScheduler(**A )
__snake_case: Union[str, Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[Any] , A : Tuple=0 ):
__snake_case: List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A ) ).to(A )
__snake_case: Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A )
# create init_image
__snake_case: int = floats_tensor((1, 3, 64, 64) , rng=random.Random(A ) ).to(A )
__snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case: Dict = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((256, 256) )
if str(A ).startswith("""mps""" ):
__snake_case: List[Any] = torch.manual_seed(A )
else:
__snake_case: Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
__snake_case: Any = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[Any] = """cpu"""
__snake_case: int = self.get_dummy_components()
__snake_case: List[str] = self.pipeline_class(**A )
__snake_case: List[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = pipe(**self.get_dummy_inputs(A ) )
__snake_case: Optional[int] = output.images
__snake_case: Union[str, Any] = pipe(
**self.get_dummy_inputs(A ) , return_dict=A , )[0]
__snake_case: Tuple = image[0, -3:, -3:, -1]
__snake_case: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case: int = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__snake_case: List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__snake_case: int = """A red cartoon frog, 4k"""
__snake_case: Any = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(A )
__snake_case: List[Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__snake_case: List[str] = pipeline.to(A )
pipeline.set_progress_bar_config(disable=A )
__snake_case: Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
__snake_case , __snake_case: Tuple = pipe_prior(
A , generator=A , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__snake_case: int = pipeline(
A , image=A , image_embeds=A , negative_image_embeds=A , generator=A , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
__snake_case: List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A , A )
| 293 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """rwkv"""
lowerCAmelCase__ = {"""max_position_embeddings""": """context_length"""}
def __init__( self : Dict , A : List[Any]=50_277 , A : List[Any]=1_024 , A : Union[str, Any]=4_096 , A : Tuple=32 , A : List[Any]=None , A : Tuple=None , A : Tuple=1E-5 , A : int=0 , A : Optional[int]=0 , A : Dict=6 , A : Dict=False , A : int=True , **A : List[Any] , ):
__snake_case: Tuple = vocab_size
__snake_case: Any = context_length
__snake_case: Dict = hidden_size
__snake_case: Dict = num_hidden_layers
__snake_case: Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
__snake_case: str = intermediate_size if intermediate_size is not None else 4 * hidden_size
__snake_case: Any = layer_norm_epsilon
__snake_case: int = rescale_every
__snake_case: str = use_cache
__snake_case: Dict = bos_token_id
__snake_case: Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=A , bos_token_id=A , eos_token_id=A , **A )
| 293 | 1 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = {"""text""": """string"""}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = {"""text""": """string"""}
__a = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a = text_path
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a = [text_path]
__a = tmp_path / """cache"""
__a = {"""text""": """string"""}
__a = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str=("train",) ):
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for split in splits:
__a = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = tmp_path / """cache"""
__a = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__a = TextDatasetReader({"""train""": text_path} , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__a = {"""text""": """string"""}
__a = features.copy() if features else default_expected_features
__a = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
__a = TextDatasetReader({"""train""": text_path} , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if split:
__a = {split: text_path}
else:
__a = """train"""
__a = {"""train""": text_path, """test""": text_path}
__a = tmp_path / """cache"""
__a = {"""text""": """string"""}
__a = TextDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 302 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A__ :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=2 , ):
'''simple docstring'''
UpperCamelCase : List[str] = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[int] = patch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : Any = is_training
UpperCamelCase : Dict = use_labels
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : str = intermediate_size
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : List[Any] = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Union[str, Any] = scope
UpperCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase : Optional[Any] = (image_size // patch_size) ** 2
UpperCamelCase : int = num_patches + 2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = TFDeiTModel(config=A_ )
UpperCamelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFDeiTForMaskedImageModeling(config=A_ )
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase : Dict = 1
UpperCamelCase : Optional[Any] = TFDeiTForMaskedImageModeling(A_ )
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Any = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.type_sequence_label_size
UpperCamelCase : List[Any] = TFDeiTForImageClassification(A_ )
UpperCamelCase : Optional[int] = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : List[Any] = 1
UpperCamelCase : Optional[Any] = TFDeiTForImageClassification(A_ )
UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : List[Any] = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase : int = config_and_inputs
UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Dict = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFDeiTModelTester(self )
UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Dense ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(A_ )
UpperCamelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : List[str] = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = TFDeiTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_ ( ) -> str:
UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Union[str, Any] = image_processor(images=A_ , return_tensors="tf" )
# forward pass
UpperCamelCase : str = model(**A_ )
# verify the logits
UpperCamelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase : Tuple = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 359 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowerCamelCase : Optional[int] = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def A_ ( _lowerCAmelCase , _lowerCAmelCase=None ) -> Optional[Any]:
require_version(deps[pkg] , _lowerCAmelCase )
| 140 | 0 |
from __future__ import annotations
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
lowercase__ : List[str] = str(SCREAMING_SNAKE_CASE_ )
return n == n[::-1]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = 1_000_000 ):
'''simple docstring'''
lowercase__ : Tuple = 0
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
if is_palindrome(SCREAMING_SNAKE_CASE_ ) and is_palindrome(bin(SCREAMING_SNAKE_CASE_ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 214 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : List[str] =["""pixel_values"""]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : List[Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = size if size is not None else {'height': 384, 'width': 384}
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE__ = do_convert_rgb
def A_ ( self : Any , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
SCREAMING_SNAKE_CASE__ = (size['height'], size['width'])
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Tuple , ):
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : str , ):
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Optional[int] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : str , ):
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE__ = [convert_to_rgb(UpperCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCAmelCase_ )
return encoded_outputs
| 169 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE__ = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = checkpoint.split('/' )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = dump_path
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
SCREAMING_SNAKE_CASE__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(UpperCamelCase_ )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
__snake_case = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 169 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42
__a =None
__a =None
def _lowerCamelCase ( ) -> Node | None:
_a = Node(1 )
_a = Node(2 )
_a = Node(3 )
_a = Node(4 )
_a = Node(5 )
return tree
def _lowerCamelCase ( lowercase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _lowerCamelCase ( lowercase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _lowerCamelCase ( lowercase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _lowerCamelCase ( lowercase : Node | None ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _lowerCamelCase ( lowercase : Node | None ) -> Sequence[Node | None]:
_a = []
if root is None:
return output
_a = deque([root] )
while process_queue:
_a = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _lowerCamelCase ( lowercase : Node | None , lowercase : int ) -> Sequence[Node | None]:
_a = []
def populate_output(lowercase : Node | None , lowercase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowercase , lowercase )
return output
def _lowerCamelCase ( lowercase : Node | None , lowercase : int ) -> Sequence[Node | None]:
_a = []
def populate_output(lowercase : Node | None , lowercase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowercase , lowercase )
return output
def _lowerCamelCase ( lowercase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_a = []
_a = 0
_a = height(lowercase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowercase , lowercase ) )
_a = 1
else:
output.append(get_nodes_from_right_to_left(lowercase , lowercase ) )
_a = 0
return output
def _lowerCamelCase ( ) -> None: # Main function for testing.
_a = make_tree()
print(F'In-order Traversal: {inorder(lowercase )}' )
print(F'Pre-order Traversal: {preorder(lowercase )}' )
print(F'Post-order Traversal: {postorder(lowercase )}' , "\n" )
print(F'Height of Tree: {height(lowercase )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowercase ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(lowercase ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(lowercase , level=lowercase ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase__ : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase__ ( a , a ) -> Union[str, Any]:
# save results
if os.path.exists(a ):
if os.path.exists(os.path.join(a , '''config.json''' ) ) and os.path.isfile(
os.path.join(a , '''config.json''' ) ):
os.remove(os.path.join(a , '''config.json''' ) )
if os.path.exists(os.path.join(a , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(a , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(a , '''pytorch_model.bin''' ) )
else:
os.makedirs(a )
model.save_pretrained(a )
def lowerCamelCase__ ( a , a=False ) -> Union[str, Any]:
_A: str = 2
if unlogit:
_A: Optional[Any] = torch.pow(a , a )
_A: Dict = p * torch.log(a )
_A: int = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( a ) -> Dict:
logger.info('''lv, h >\t''' + '''\t'''.join(f"""{x + 1}""" for x in range(len(a ) ) ) )
for row in range(len(a ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( a , a , a , a=True , a=True , a=None , a=False ) -> List[Any]:
_A , _A: str = model.config.num_hidden_layers, model.config.num_attention_heads
_A: List[str] = torch.zeros(a , a ).to(args.device )
_A: int = torch.zeros(a , a ).to(args.device )
if head_mask is None:
_A: Optional[Any] = torch.ones(a , a ).to(args.device )
head_mask.requires_grad_(requires_grad=a )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_A: List[Any] = None
_A: str = 0.0
_A: int = 0.0
for step, inputs in enumerate(tqdm(a , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_A: List[str] = tuple(t.to(args.device ) for t in inputs )
((_A) , ): Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_A: List[str] = model(a , labels=a , head_mask=a )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_A , _A , _A: int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(a ):
_A: Optional[Any] = entropy(attn.detach() , a )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(a ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_A: Optional[Any] = 2
_A: Any = torch.pow(torch.pow(a , a ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_A: Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(a )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(a )
logger.info('''Head ranked by importance scores''' )
_A: Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_A: List[Any] = torch.arange(
head_importance.numel() , device=args.device )
_A: Dict = head_ranks.view_as(a )
print_ad_tensor(a )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( a , a , a ) -> List[Any]:
_A , _A , _A: Optional[Any] = compute_heads_importance(a , a , a , compute_entropy=a )
_A: List[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , a , original_score * args.masking_threshold )
_A: Tuple = torch.ones_like(a )
_A: Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_A: List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
_A: str = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_A: List[Any] = float('''Inf''' )
_A: Optional[Any] = head_importance.view(-1 ).sort()[1]
if len(a ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_A: List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_A: List[Any] = new_head_mask.view(-1 )
_A: Dict = 0.0
_A: List[str] = new_head_mask.view_as(a )
_A: List[Any] = new_head_mask.clone().detach()
print_ad_tensor(a )
# Compute metric and head importance again
_A , _A , _A: Dict = compute_heads_importance(
a , a , a , compute_entropy=a , head_mask=a )
_A: List[str] = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , a , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('''Final head mask''' )
print_ad_tensor(a )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( a , a , a , a ) -> Union[str, Any]:
_A: List[Any] = datetime.now()
_A , _A , _A: Optional[int] = compute_heads_importance(
a , a , a , compute_entropy=a , compute_importance=a , head_mask=a )
_A: List[Any] = 1 / loss
_A: List[Any] = datetime.now() - before_time
_A: Optional[int] = sum(p.numel() for p in model.parameters() )
_A: List[str] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(a ) )
}
for k, v in heads_to_prune.items():
if isinstance(a , a ):
_A: Union[str, Any] = [
v,
]
assert sum(len(a ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(a )
_A: Dict = sum(p.numel() for p in model.parameters() )
_A: Tuple = datetime.now()
_A , _A , _A: List[Any] = compute_heads_importance(
a , a , a , compute_entropy=a , compute_importance=a , head_mask=a , actually_pruned=a , )
_A: str = 1 / loss
_A: List[str] = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , a , a , pruned_num_params / original_num_params * 1_00 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , a , a )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_00 )
save_model(a , args.output_dir )
def lowerCamelCase__ ( ) -> int:
_A: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a , type=a , required=a , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a , type=a , required=a , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=a , type=a , required=a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=a , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=a , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=a , type=a , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=a , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=a , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=a , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=a , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=a , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=a , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=a , default=42 )
parser.add_argument('''--local_rank''' , type=a , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=a , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=a , default='''''' , help='''Can be used for distant debugging.''' )
_A: Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_A: Optional[int] = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_A: Union[str, Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_A: Optional[int] = torch.device('''cuda''' , args.local_rank )
_A: List[Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_A: List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_A: str = nn.parallel.DistributedDataParallel(
a , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=a )
elif args.n_gpu > 1:
_A: Optional[int] = nn.DataParallel(a )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=a )
torch.save(a , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , a )
# Prepare dataset
_A: str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_A: Tuple = (torch.from_numpy(a ),)
_A: Optional[Any] = TensorDataset(*a )
_A: Any = RandomSampler(a )
_A: Optional[Any] = DataLoader(a , sampler=a , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(a , a , a )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_A: Optional[Any] = mask_heads(a , a , a )
prune_heads(a , a , a , a )
if __name__ == "__main__":
main()
| 301 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : Any = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : Optional[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowerCamelCase__ ( a , a , a ) -> Union[str, Any]:
_A: Optional[int] = SavedModel()
_A: int = []
with open(os.path.join(a , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
_A: List[Any] = json.load(a )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(a )] )
with open(a , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
_A: Optional[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A: Optional[int] = sorted(a )
_A: Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a )
if strict and len(a ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(a ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*a , sep='''\n''' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : int = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 301 | 1 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __A (_SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :str = field(
metadata={"""help""": """The csv file to plot."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
__magic_name__ :bool = field(
default=a , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
__magic_name__ :Optional[List[str]] = list_field(
default=a , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
int(_SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
def __A (_SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
try:
float(_SCREAMING_SNAKE_CASE )
return True
except ValueError:
return False
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = args
lowerCAmelCase__ :int = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
lowerCAmelCase__ :Optional[Any] = csv.DictReader(__UpperCAmelCase )
for row in reader:
lowerCAmelCase__ :List[str] = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
lowerCAmelCase__ :List[str] = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
lowerCAmelCase__ :Optional[int] = float(row['result'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :int = plt.subplots()
lowerCAmelCase__ :List[str] = 'Time usage' if self.args.is_time else 'Memory usage'
lowerCAmelCase__ :Tuple = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
lowerCAmelCase__ :List[str] = sorted(set(self.result_dict[model_name]['bsz'] ) )
lowerCAmelCase__ :List[Any] = sorted(set(self.result_dict[model_name]['seq_len'] ) )
lowerCAmelCase__ :Tuple = self.result_dict[model_name]['result']
((lowerCAmelCase__) , (lowerCAmelCase__)) :str = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
lowerCAmelCase__ :int = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
lowerCAmelCase__ :List[str] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__UpperCAmelCase , )
else:
lowerCAmelCase__ :List[str] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((lowerCAmelCase__) , (lowerCAmelCase__)) :Optional[Any] = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
lowerCAmelCase__ :Tuple = np.asarray(__UpperCAmelCase , __UpperCAmelCase )[: len(__UpperCAmelCase )]
plt.scatter(
__UpperCAmelCase , __UpperCAmelCase , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(__UpperCAmelCase , __UpperCAmelCase , '--' )
title_str += F" {label_model_name} vs."
lowerCAmelCase__ :int = title_str[:-4]
lowerCAmelCase__ :Dict = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(__UpperCAmelCase )
plt.xlabel(__UpperCAmelCase )
plt.ylabel(__UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __A () ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = HfArgumentParser(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase__ :Dict = Plot(args=_SCREAMING_SNAKE_CASE )
plot.plot()
if __name__ == "__main__":
main()
| 293 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = XGLMTokenizer
__magic_name__ :Any = XGLMTokenizerFast
__magic_name__ :Dict = True
__magic_name__ :Union[str, Any] = True
def snake_case ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ :int = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = '<pad>'
lowerCAmelCase__ :int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_8 )
def snake_case ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase__ :int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCAmelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def snake_case ( self ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def snake_case ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCAmelCase , f.name )
lowerCAmelCase__ :Dict = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = pickle.dumps(__UpperCAmelCase )
pickle.loads(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ :List[str] = self.get_rust_tokenizer()
lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :Dict = tokenizer.tokenize(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = self.get_rust_tokenizer()
lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 'Hello World!'
lowerCAmelCase__ :Tuple = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowerCAmelCase__ :List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {
'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='facebook/xglm-564M' , padding=__UpperCAmelCase , )
| 293 | 1 |
'''simple docstring'''
import argparse
import json
import subprocess
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = (
F"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
__lowercase = subprocess.run(A__ , shell=A__ , stdout=subprocess.PIPE )
__lowercase = output.stdout.decode('''utf-8''' )
__lowercase = json.loads(A__ )
__lowercase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(A__ )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(A__ ) )
if len(A__ ) > 0:
__lowercase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def _A ( A__ ):
"""simple docstring"""
return values.split(''',''' )
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
lowerCAmelCase__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 52 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE : str = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowercase = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) )
__lowercase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ) as fp:
fp.write(json.dumps(lowercase__ ) )
with open(self.merges_file ,'''w''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Optional[Any] ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
__lowercase = '''lower'''
__lowercase = ['''low''', '''er</w>''']
__lowercase = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokens + ['''<unk>''']
__lowercase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Dict=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowercase__ ,**lowercase__ )
# Simple input
__lowercase = '''This is a simple input'''
__lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
__lowercase = ('''This is a simple input''', '''This is a pair''')
__lowercase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowercase__ ,tokenizer_r.encode ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(lowercase__ ,tokenizer_r.encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Simple input
self.assertRaises(
lowercase__ ,tokenizer_r.batch_encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' ,)
# Pair input
self.assertRaises(lowercase__ ,tokenizer_r.encode ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(lowercase__ ,tokenizer_r.encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' )
# Pair input
self.assertRaises(
lowercase__ ,tokenizer_r.batch_encode_plus ,lowercase__ ,max_length=lowercase__ ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
pass
| 52 | 1 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase__ : Any = 2
class __snake_case :
def __init__( self , *, # begin keyword-only arguments
__UpperCamelCase="<s>" , __UpperCamelCase="<pad>" , __UpperCamelCase="</s>" , __UpperCamelCase="<unk>" , __UpperCamelCase=None , ) -> Any:
'''simple docstring'''
snake_case__ : Tuple = bos, unk, pad, eos
snake_case__ : Optional[Any] = []
snake_case__ : Dict = []
snake_case__ : List[Any] = {}
snake_case__ : int = self.add_symbol(__UpperCamelCase )
snake_case__ : Union[str, Any] = self.add_symbol(__UpperCamelCase )
snake_case__ : Union[str, Any] = self.add_symbol(__UpperCamelCase )
snake_case__ : Any = self.add_symbol(__UpperCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__UpperCamelCase )
snake_case__ : Tuple = len(self.symbols )
def __eq__( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> List[str]:
'''simple docstring'''
return len(self.symbols )
def __contains__( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
return sym in self.indices
@classmethod
def __a ( cls , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
snake_case__ : int = cls()
d.add_from_file(__UpperCamelCase )
return d
def __a ( self , __UpperCamelCase , __UpperCamelCase=1 , __UpperCamelCase=False ) -> Dict:
'''simple docstring'''
if word in self.indices and not overwrite:
snake_case__ : List[Any] = self.indices[word]
snake_case__ : List[str] = self.count[idx] + n
return idx
else:
snake_case__ : int = len(self.symbols )
snake_case__ : Optional[Any] = idx
self.symbols.append(__UpperCamelCase )
self.count.append(__UpperCamelCase )
return idx
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 0
def __a ( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
try:
with open(__UpperCamelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(__UpperCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(__UpperCamelCase ) )
return
snake_case__ : Any = f.readlines()
snake_case__ : List[Any] = self._load_meta(__UpperCamelCase )
for line in lines[indices_start_line:]:
try:
snake_case__ : int = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
snake_case__ : Optional[int] = True
snake_case__ : str = line.rsplit(' ' , 1 )
else:
snake_case__ : Optional[int] = False
snake_case__ : Optional[int] = int(__UpperCamelCase )
snake_case__ : Tuple = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(__UpperCamelCase ) )
self.add_symbol(__UpperCamelCase , n=__UpperCamelCase , overwrite=__UpperCamelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
snake_case__ : Optional[Any] = dict((re.sub(r'@@$' , '' , __lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , __lowercase ), v) for k, v in d.items() )
snake_case__ : Optional[Any] = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
snake_case__ : Union[str, Any] = d[k] # restore
return da
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
if not os.path.exists(__lowercase ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__lowercase , exist_ok=__lowercase )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
snake_case__ : Optional[Any] = os.path.join(__lowercase , 'checkpoint.pt' )
if not os.path.isfile(__lowercase ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
snake_case__ : Any = torch.load(__lowercase , map_location='cpu' )
snake_case__ : str = chkpt['cfg']['model']
# dicts
snake_case__ : Any = os.path.join(__lowercase , 'dict.txt' )
if not os.path.isfile(__lowercase ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
snake_case__ : Optional[int] = Dictionary.load(__lowercase )
snake_case__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
snake_case__ : List[Any] = len(__lowercase )
snake_case__ : Tuple = os.path.join(__lowercase , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowercase , ensure_ascii=__lowercase , indent=__lowercase ) )
# merges_file (bpecodes)
snake_case__ : List[Any] = os.path.join(__lowercase , 'bpecodes' )
if not os.path.isfile(__lowercase ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
snake_case__ : Optional[Any] = os.path.join(__lowercase , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__lowercase , __lowercase )
# model config
snake_case__ : Dict = os.path.join(__lowercase , 'config.json' )
snake_case__ : List[Any] = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.0_2,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowercase , ensure_ascii=__lowercase , indent=__lowercase ) )
# tokenizer config
snake_case__ : List[Any] = os.path.join(__lowercase , __lowercase )
snake_case__ : Dict = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(__lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowercase , ensure_ascii=__lowercase , indent=__lowercase ) )
# model
snake_case__ : Any = chkpt['model']
# remove unneeded keys
snake_case__ : List[Any] = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__lowercase , __lowercase )
snake_case__ : int = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
snake_case__ : Union[str, Any] = model_state_dict.pop(__lowercase )
else:
snake_case__ : str = model_state_dict.pop(__lowercase )
snake_case__ : Optional[int] = BioGptConfig.from_pretrained(__lowercase )
snake_case__ : List[Any] = BioGptForCausalLM(__lowercase )
# check that it loads ok
model_new.load_state_dict(__lowercase )
# save
snake_case__ : List[str] = os.path.join(__lowercase , __lowercase )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__lowercase , __lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
lowerCAmelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ : List[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 143 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_UpperCAmelCase = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_UpperCAmelCase = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_UpperCAmelCase = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_UpperCAmelCase = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_UpperCAmelCase = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ):
'''simple docstring'''
for tf_name, hf_name in patterns:
A_ : Tuple = k.replace(__lowercase ,__lowercase )
return k
def UpperCamelCase ( __lowercase : dict ,__lowercase : dict ):
'''simple docstring'''
A_ : int = BigBirdPegasusConfig(**__lowercase )
A_ : Any = BigBirdPegasusForConditionalGeneration(__lowercase )
A_ : Union[str, Any] = torch_model.state_dict()
A_ : Any = {}
# separating decoder weights
A_ : Any = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
A_ : str = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() ,'tf -> hf conversion' ):
A_ : Optional[int] = [k.endswith(__lowercase ) for ending in KEYS_TO_IGNORE]
if any(__lowercase ):
continue
A_ : Optional[Any] = DECODER_PATTERNS
A_ : Tuple = rename_state_dict_key(__lowercase ,__lowercase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
A_ : Any = v.T
A_ : Any = torch.from_numpy(__lowercase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() ,'tf -> hf conversion' ):
A_ : int = [k.endswith(__lowercase ) for ending in KEYS_TO_IGNORE]
if any(__lowercase ):
continue
A_ : Any = REMAINING_PATTERNS
A_ : List[str] = rename_state_dict_key(__lowercase ,__lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
A_ : int = v.T
A_ : Dict = torch.from_numpy(__lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
A_ : Optional[int] = mapping['model.embed_positions.weight']
A_ : Tuple = mapping.pop('model.embed_positions.weight' )
A_ , A_ : Optional[Any] = torch_model.load_state_dict(__lowercase ,strict=__lowercase )
A_ : Optional[int] = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : str = tf.train.list_variables(__lowercase )
A_ : Union[str, Any] = {}
A_ : Optional[Any] = ['global_step']
for name, shape in tqdm(__lowercase ,desc='converting tf checkpoint to dict' ):
A_ : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
A_ : Tuple = tf.train.load_variable(__lowercase ,__lowercase )
A_ : Dict = array
return tf_weights
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : dict ):
'''simple docstring'''
A_ : Optional[Any] = get_tf_weights_as_numpy(__lowercase )
A_ : Dict = convert_bigbird_pegasus(__lowercase ,__lowercase )
torch_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 140 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
UpperCAmelCase__ : Union[str, Any] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(_A ) , _A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_A ) , x.transpose() ) )
UpperCAmelCase__ : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase__ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ) , transpose(_A ).numpy() ) )
UpperCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase__ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , transpose(_A , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = np.random.randn(3 , 4 )
UpperCAmelCase__ : List[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ) , transpose(_A ).numpy() ) )
UpperCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase__ : List[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , transpose(_A , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
UpperCAmelCase__ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ) , np.asarray(transpose(_A ) ) ) )
UpperCAmelCase__ : Tuple = np.random.randn(3 , 4 , 5 )
UpperCAmelCase__ : Optional[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , np.asarray(transpose(_A , axes=(1, 2, 0) ) ) ) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , np.reshape(_A , (4, 3) ) ) )
UpperCAmelCase__ : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , np.reshape(_A , (12, 5) ) ) )
@require_torch
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase__ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , reshape(_A , (4, 3) ).numpy() ) )
UpperCAmelCase__ : int = np.random.randn(3 , 4 , 5 )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , reshape(_A , (12, 5) ).numpy() ) )
@require_tf
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase__ : List[str] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , reshape(_A , (4, 3) ).numpy() ) )
UpperCAmelCase__ : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase__ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , reshape(_A , (12, 5) ).numpy() ) )
@require_flax
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = np.random.randn(3 , 4 )
UpperCAmelCase__ : Any = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , np.asarray(reshape(_A , (4, 3) ) ) ) )
UpperCAmelCase__ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase__ : Optional[Any] = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , np.asarray(reshape(_A , (12, 5) ) ) ) )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_A ) , np.squeeze(_A ) ) )
UpperCAmelCase__ : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , np.squeeze(_A , axis=2 ) ) )
@require_torch
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = np.random.randn(1 , 3 , 4 )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ) , squeeze(_A ).numpy() ) )
UpperCAmelCase__ : int = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase__ : int = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , squeeze(_A , axis=2 ).numpy() ) )
@require_tf
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = np.random.randn(1 , 3 , 4 )
UpperCAmelCase__ : List[str] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ) , squeeze(_A ).numpy() ) )
UpperCAmelCase__ : Dict = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase__ : int = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , squeeze(_A , axis=2 ).numpy() ) )
@require_flax
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase__ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ) , np.asarray(squeeze(_A ) ) ) )
UpperCAmelCase__ : Any = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase__ : Any = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , np.asarray(squeeze(_A , axis=2 ) ) ) )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , np.expand_dims(_A , axis=1 ) ) )
@require_torch
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = np.random.randn(3 , 4 )
UpperCAmelCase__ : Optional[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , expand_dims(_A , axis=1 ).numpy() ) )
@require_tf
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase__ : Tuple = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , expand_dims(_A , axis=1 ).numpy() ) )
@require_flax
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = np.random.randn(3 , 4 )
UpperCAmelCase__ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , np.asarray(expand_dims(_A , axis=1 ) ) ) )
| 299 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def a__ ( lowerCAmelCase__ ) -> None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = analyze_text(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
UpperCAmelCase__ : str = sum(single_char_strings.values() )
# one length string
UpperCAmelCase__ : int = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
UpperCAmelCase__ : Optional[int] = single_char_strings[ch]
UpperCAmelCase__ : int = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCAmelCase__ ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
UpperCAmelCase__ : str = sum(two_char_strings.values() )
UpperCAmelCase__ : Optional[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
UpperCAmelCase__ : Optional[int] = cha + cha
if sequence in two_char_strings:
UpperCAmelCase__ : Dict = two_char_strings[sequence]
UpperCAmelCase__ : Optional[int] = int(lowerCAmelCase__ ) / all_sum
my_sec_sum += prob * math.loga(lowerCAmelCase__ )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def a__ ( lowerCAmelCase__ ) -> tuple[dict, dict]:
UpperCAmelCase__ : Union[str, Any] = Counter() # type: ignore
UpperCAmelCase__ : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def a__ ( ) -> Tuple:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 299 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
def __init__( self :List[str] , lowerCamelCase :Optional[Any] , lowerCamelCase :int=7 , lowerCamelCase :Any=3 , lowerCamelCase :Optional[Any]=18 , lowerCamelCase :List[str]=30 , lowerCamelCase :List[Any]=400 , lowerCamelCase :List[Any]=True , lowerCamelCase :Any=None , lowerCamelCase :Tuple=True , ) -> Union[str, Any]:
UpperCAmelCase__ = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = min_resolution
UpperCAmelCase__ = max_resolution
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = apply_ocr
def UpperCAmelCase_ ( self :Any ) -> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase_ ( self :str ) -> Any:
UpperCAmelCase__ = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self :Optional[int] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self :Tuple ) -> Dict:
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "apply_ocr" ) )
def UpperCAmelCase_ ( self :List[str] ) -> Optional[int]:
UpperCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCAmelCase_ ( self :List[str] ) -> List[str]:
pass
def UpperCAmelCase_ ( self :int ) -> Dict:
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , lowerCamelCase )
self.assertIsInstance(encoding.boxes , lowerCamelCase )
# Test batched
UpperCAmelCase__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self :Optional[int] ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase_ ( self :List[Any] ) -> Any:
# with apply_OCR = True
UpperCAmelCase__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase__ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
UpperCAmelCase__ = Image.open(ds[0]["file"] ).convert("RGB" )
UpperCAmelCase__ = image_processing(lowerCamelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase__ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
UpperCAmelCase__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCamelCase )
self.assertListEqual(encoding.boxes , lowerCamelCase )
# with apply_OCR = False
UpperCAmelCase__ = LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase )
UpperCAmelCase__ = image_processing(lowerCamelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 169 |
def lowerCAmelCase ( _lowerCAmelCase : str ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_lowerCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 169 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Dict , __magic_name__ :int , __magic_name__ :Optional[int]=7 , __magic_name__ :Optional[Any]=3 , __magic_name__ :Union[str, Any]=30 , __magic_name__ :List[Any]=400 , __magic_name__ :Optional[int]=True , __magic_name__ :Dict=None , __magic_name__ :List[Any]=True , __magic_name__ :int=[0.5, 0.5, 0.5] , __magic_name__ :Optional[Any]=[0.5, 0.5, 0.5] , __magic_name__ :Optional[Any]=True , __magic_name__ :str=1 / 255 , __magic_name__ :Union[str, Any]=True , ):
'''simple docstring'''
a = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
a = do_rescale
a = rescale_factor
a = do_pad
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase__ ( self :str , __magic_name__ :str , __magic_name__ :Tuple=False ):
'''simple docstring'''
if not batched:
a = image_inputs[0]
if isinstance(__magic_name__ , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size["""shortest_edge"""] * h / w )
a = self.size["""shortest_edge"""]
elif w > h:
a = self.size["""shortest_edge"""]
a = int(self.size["""shortest_edge"""] * w / h )
else:
a = self.size["""shortest_edge"""]
a = self.size["""shortest_edge"""]
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__magic_name__ , key=lambda __magic_name__ : item[0] )[0]
a = max(__magic_name__ , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = YolosImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = YolosImageProcessingTester(self )
@property
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__magic_name__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
a = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.image_processing_class(**self.image_processor_dict )
a = self.image_processing_class(do_resize=__magic_name__ , do_normalize=__magic_name__ , do_rescale=__magic_name__ )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
a = image_processing_a.pad(__magic_name__ , return_tensors="""pt""" )
a = image_processing_a(__magic_name__ , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
a = json.loads(f.read() )
a = {"""image_id""": 3_9769, """annotations""": target}
# encode them
a = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
a = image_processing(images=__magic_name__ , annotations=__magic_name__ , return_tensors="""pt""" )
# verify pixel values
a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __magic_name__ )
a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __magic_name__ , atol=1E-4 ) )
# verify area
a = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __magic_name__ ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __magic_name__ )
a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __magic_name__ , atol=1E-3 ) )
# verify image_id
a = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __magic_name__ ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __magic_name__ ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __magic_name__ ) )
# verify orig_size
a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __magic_name__ ) )
# verify size
a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __magic_name__ ) )
@slow
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
a = json.loads(f.read() )
a = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
a = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
a = YolosImageProcessor(format="""coco_panoptic""" )
a = image_processing(images=__magic_name__ , annotations=__magic_name__ , masks_path=__magic_name__ , return_tensors="""pt""" )
# verify pixel values
a = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __magic_name__ )
a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __magic_name__ , atol=1E-4 ) )
# verify area
a = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __magic_name__ ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __magic_name__ )
a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __magic_name__ , atol=1E-3 ) )
# verify image_id
a = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __magic_name__ ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __magic_name__ ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __magic_name__ ) )
# verify masks
a = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __magic_name__ )
# verify orig_size
a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __magic_name__ ) )
# verify size
a = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __magic_name__ ) )
| 371 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Tuple=13 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=True , __magic_name__ :str=99 , __magic_name__ :Optional[Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :int=37 , __magic_name__ :Tuple="gelu" , __magic_name__ :List[str]=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Tuple=512 , __magic_name__ :Dict=16 , __magic_name__ :Optional[int]=2 , __magic_name__ :Optional[int]=0.02 , __magic_name__ :Optional[Any]=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""roberta-base""" , from_pt=__magic_name__ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 347 | 0 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
SCREAMING_SNAKE_CASE_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 256
def lowercase (_lowerCAmelCase ):
if len(_lowerCAmelCase ) < MIN_NUM_TOKENS:
return None
__lowerCAmelCase = MinHash(num_perm=_lowerCAmelCase )
for token in set(_lowerCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def lowercase (_lowerCAmelCase ):
return {t for t in NON_ALPHA.split(_lowerCAmelCase ) if len(t.strip() ) > 0}
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , *,
snake_case_ = 0.85 , ) -> Optional[int]:
__lowerCAmelCase = duplication_jaccard_threshold
__lowerCAmelCase = NUM_PERM
__lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowerCAmelCase = defaultdict(snake_case_ )
def A__ ( self , snake_case_ , snake_case_ ) -> None:
__lowerCAmelCase = self._index.query(snake_case_ )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(snake_case_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(snake_case_ )
def A__ ( self ) -> List[List[Dict]]:
__lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
__lowerCAmelCase = [base] + list(snake_case_ )
# reformat the cluster to be a list of dict
__lowerCAmelCase = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(snake_case_ )
return duplicate_clusters
def A__ ( self , snake_case_ ) -> None:
__lowerCAmelCase = self.get_duplicate_clusters()
with open(snake_case_ , """w""" ) as f:
json.dump(snake_case_ , snake_case_ )
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase , __lowerCAmelCase = element
__lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase (_lowerCAmelCase ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_lowerCAmelCase , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_lowerCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCAmelCase ) ) , max_queue_size=100 ) ):
di.add(_lowerCAmelCase , _lowerCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_tokens(_lowerCAmelCase )
__lowerCAmelCase = get_tokens(_lowerCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
SCREAMING_SNAKE_CASE_ = None
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
for elementa in cluster:
__lowerCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowerCAmelCase = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCAmelCase , _lowerCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowerCAmelCase = 1
extremes.append(_lowerCAmelCase )
return extremes
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
global _shared_dataset
__lowerCAmelCase = dataset
__lowerCAmelCase = []
__lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowerCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCAmelCase , _lowerCAmelCase , ) , total=len(_lowerCAmelCase ) , ):
extremes_list.append(_lowerCAmelCase )
return extremes_list
def lowercase (_lowerCAmelCase , _lowerCAmelCase = 0.85 ):
__lowerCAmelCase = make_duplicate_clusters(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowerCAmelCase = {}
__lowerCAmelCase = find_extremes(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
__lowerCAmelCase = element
__lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
__lowerCAmelCase = dataset.filter(lambda _lowerCAmelCase , _lowerCAmelCase : idx not in remove_indices , with_indices=_lowerCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowerCAmelCase = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowerCAmelCase = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"""Original dataset size: {len(_lowerCAmelCase )}""" )
print(f"""Number of duplicate clusters: {len(_lowerCAmelCase )}""" )
print(f"""Files in duplicate cluster: {len(_lowerCAmelCase )}""" )
print(f"""Unique files in duplicate cluster: {len(_lowerCAmelCase )}""" )
print(f"""Filtered dataset size: {len(_lowerCAmelCase )}""" )
return ds_filter, duplicate_clusters
| 301 |
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase : Union[str, Any] = '''BlipImageProcessor'''
__UpperCAmelCase : Any = '''AutoTokenizer'''
def __init__( self : Optional[Any] ,_a : Any ,_a : List[str] ,_a : Optional[int] ):
'''simple docstring'''
super().__init__(_a ,_a )
# add QFormer tokenizer
_a : Dict = qformer_tokenizer
def __call__( self : List[str] ,_a : ImageInput = None ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_a : List[str] = BatchFeature()
if text is not None:
_a : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
encoding.update(_a )
_a : str = self.qformer_tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
_a : Optional[Any] = qformer_text_encoding.pop('input_ids' )
_a : Union[str, Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_a : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def __lowercase ( self : int ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Union[str, Any] = self.tokenizer.model_input_names
_a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __lowercase ( self : List[str] ,_a : Union[str, Any] ,**_a : Tuple ):
'''simple docstring'''
if os.path.isfile(_a ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_a ,exist_ok=_a )
_a : List[Any] = os.path.join(_a ,'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(_a )
return super().save_pretrained(_a ,**_a )
@classmethod
def __lowercase ( cls : Union[str, Any] ,_a : Optional[int] ,**_a : int ):
'''simple docstring'''
_a : List[str] = AutoTokenizer.from_pretrained(_a ,subfolder='qformer_tokenizer' )
_a : str = cls._get_arguments_from_pretrained(_a ,**_a )
args.append(_a )
return cls(*_a )
| 362 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__lowerCamelCase : Optional[Any] = get_logger(__name__)
class A__ ( enum.Enum ):
_UpperCAmelCase :Union[str, Any] = 'all_checks'
_UpperCAmelCase :Union[str, Any] = 'basic_checks'
_UpperCAmelCase :List[str] = 'no_checks'
class A__ ( __snake_case ):
pass
class A__ ( __snake_case ):
pass
class A__ ( __snake_case ):
pass
class A__ ( __snake_case ):
pass
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> int:
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) )
if len(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) )
UpperCamelCase : Dict = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCamelCase : List[Any] = " for " + verification_name if verification_name is not None else ""
if len(_lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class A__ ( __snake_case ):
pass
class A__ ( __snake_case ):
pass
class A__ ( __snake_case ):
pass
class A__ ( __snake_case ):
pass
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) )
if len(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(_lowerCAmelCase ) - set(_lowerCAmelCase ) ) )
UpperCamelCase : str = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(_lowerCAmelCase ) )
logger.info("All the splits matched successfully." )
def A_ ( _lowerCAmelCase , _lowerCAmelCase = True ) -> dict:
if record_checksum:
UpperCamelCase : int = shaaaa()
with open(_lowerCAmelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"" ):
m.update(_lowerCAmelCase )
UpperCamelCase : str = m.hexdigest()
else:
UpperCamelCase : Tuple = None
return {"num_bytes": os.path.getsize(_lowerCAmelCase ), "checksum": checksum}
def A_ ( _lowerCAmelCase ) -> Any:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 52 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ):
'''simple docstring'''
UpperCamelCase : int = bp_numa
UpperCamelCase : int = bp_numa
UpperCamelCase : List[Any] = bp_numa
UpperCamelCase : Optional[int] = conva_get[:2]
UpperCamelCase : Optional[Any] = conva_get[2]
UpperCamelCase : Dict = size_pa
UpperCamelCase : Union[str, Any] = rate_w
UpperCamelCase : Dict = rate_t
UpperCamelCase : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1
UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1
UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(A_ , "wb" ) as f:
pickle.dump(A_ , A_ )
print(F"""Model saved: {save_path}""" )
@classmethod
def __UpperCamelCase( cls , A_ ):
'''simple docstring'''
with open(A_ , "rb" ) as f:
UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301
UpperCamelCase : List[Any] = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" )
UpperCamelCase : List[Any] = model_dic.get("num_bp1" )
UpperCamelCase : Dict = model_dic.get("num_bp2" )
UpperCamelCase : Dict = model_dic.get("num_bp3" )
UpperCamelCase : Dict = model_dic.get("rate_weight" )
UpperCamelCase : str = model_dic.get("rate_thre" )
# create model instance
UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ )
# modify model parameter
UpperCamelCase : str = model_dic.get("w_conv1" )
UpperCamelCase : Optional[Any] = model_dic.get("wkj" )
UpperCamelCase : int = model_dic.get("vji" )
UpperCamelCase : Any = model_dic.get("thre_conv1" )
UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" )
UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" )
return conv_ins
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return round(A_ , 3 )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = convs[0]
UpperCamelCase : Optional[Any] = convs[1]
UpperCamelCase : Optional[Any] = np.shape(A_ )[0]
# get the data slice of original image data, data_focus
UpperCamelCase : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , A_ ):
for j_focus in range(0 , size_data - size_conv + 1 , A_ ):
UpperCamelCase : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(A_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(A_ ):
UpperCamelCase : str = []
for i_focus in range(len(A_ ) ):
UpperCamelCase : List[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(A_ ) )
UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape(
A_ , A_ )
data_featuremap.append(A_ )
# expanding the data slice to One dimenssion
UpperCamelCase : List[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(A_ ) )
UpperCamelCase : Tuple = np.asarray(A_ )
return focus_list, data_featuremap
def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ):
'''simple docstring'''
UpperCamelCase : Any = len(featuremaps[0] )
UpperCamelCase : str = int(size_map / size_pooling )
UpperCamelCase : Optional[int] = []
for i_map in range(len(A_ ) ):
UpperCamelCase : Tuple = featuremaps[i_map]
UpperCamelCase : Any = []
for i_focus in range(0 , A_ , A_ ):
for j_focus in range(0 , A_ , A_ ):
UpperCamelCase : int = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(A_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(A_ ) )
UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ )
featuremap_pooled.append(A_ )
return featuremap_pooled
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = []
for i in range(len(A_ ) ):
UpperCamelCase : List[Any] = np.shape(data[i] )
UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] )
UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(A_ )
UpperCamelCase : Any = np.asarray(A_ )
return data_expanded
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = np.asarray(A_ )
UpperCamelCase : List[Any] = np.shape(A_ )
UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = 0
for i_map in range(A_ ):
UpperCamelCase : int = np.ones((size_map, size_map) )
for i in range(0 , A_ , A_ ):
for j in range(0 , A_ , A_ ):
UpperCamelCase : str = pd_pool[
i_pool
]
UpperCamelCase : str = i_pool + 1
UpperCamelCase : str = np.multiply(
A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(A_ )
return pd_all
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ):
'''simple docstring'''
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(A_ )) )
print((" - - Shape: Teach_Data ", np.shape(A_ )) )
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : int = 1_0000
while rp < n_repeat and mse >= error_accuracy:
UpperCamelCase : Tuple = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(A_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCamelCase : Any = np.asmatrix(datas_train[p] )
UpperCamelCase : List[str] = np.asarray(datas_teach[p] )
UpperCamelCase , UpperCamelCase : Dict = self.convolute(
A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga )
UpperCamelCase : int = np.shape(A_ )
UpperCamelCase : List[str] = self._expand(A_ )
UpperCamelCase : Optional[int] = data_bp_input
UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa
UpperCamelCase : Optional[int] = self.sig(A_ )
UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa
UpperCamelCase : Dict = self.sig(A_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCamelCase : List[Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) )
UpperCamelCase : str = np.multiply(
np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) )
UpperCamelCase : Any = np.dot(A_ , self.vji )
UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist()
UpperCamelCase : List[Any] = self._calculate_gradient_from_pool(
A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] )
UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ )
UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCamelCase : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCamelCase : Any = rp + 1
UpperCamelCase : Union[str, Any] = error_count / patterns
all_mse.append(A_ )
def draw_error():
UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(A_ , "+-" )
plt.plot(A_ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(A_ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(A_ )) )
for p in range(len(A_ ) ):
UpperCamelCase : int = np.asmatrix(datas_test[p] )
UpperCamelCase , UpperCamelCase : Any = self.convolute(
A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga )
UpperCamelCase : Dict = self._expand(A_ )
UpperCamelCase : List[Any] = data_bp_input
UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa
UpperCamelCase : List[Any] = self.sig(A_ )
UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa
UpperCamelCase : Optional[int] = self.sig(A_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out]
return np.asarray(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = np.asmatrix(A_ )
UpperCamelCase , UpperCamelCase : List[Any] = self.convolute(
A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase : str = self.pooling(A_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 52 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase = 25_0004
UpperCAmelCase = 25_0020
@require_sentencepiece
@require_tokenizers
class __snake_case( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = MBartTokenizer
UpperCAmelCase : Optional[int] = MBartTokenizerFast
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = True
def __snake_case ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = MBartTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self ) -> Any:
lowerCAmelCase = MBartTokenizer(A_ , keep_accents=A_ )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __snake_case ( self ) -> int:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCAmelCase = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(A_ )
lowerCAmelCase = tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A_ , A_ )
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(A_ )
lowerCAmelCase = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(A_ , legacy_format=A_ )
lowerCAmelCase = tokenizer_p.save_pretrained(A_ )
# Checks it save with the same files
self.assertSequenceEqual(A_ , A_ )
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(A_ )
lowerCAmelCase = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = tokenizer_r.save_pretrained(A_ , legacy_format=A_ )
lowerCAmelCase = tokenizer_p.save_pretrained(A_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase = tokenizer_r.from_pretrained(A_ )
lowerCAmelCase = tokenizer_p.from_pretrained(A_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A_ , A_ ) )
shutil.rmtree(A_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = "facebook/mbart-large-en-ro"
UpperCAmelCase : Dict = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
UpperCAmelCase : Tuple = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
UpperCAmelCase : str = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def __snake_case ( cls ) -> int:
lowerCAmelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
lowerCAmelCase = 1
return cls
def __snake_case ( self ) -> List[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 25_0020 )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A_ )
def __snake_case ( self ) -> List[str]:
self.assertIn(A_ , self.tokenizer.all_special_ids )
lowerCAmelCase = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowerCAmelCase = self.tokenizer.decode(A_ , skip_special_tokens=A_ )
lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
self.assertNotIn(self.tokenizer.eos_token , A_ )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A_ )
lowerCAmelCase = 10
lowerCAmelCase = self.tokenizer(A_ , max_length=A_ , truncation=A_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A_ )
self.assertEqual(len(A_ ) , A_ )
def __snake_case ( self ) -> List[Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_0026, 25_0001] )
def __snake_case ( self ) -> str:
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A_ )
lowerCAmelCase = MBartTokenizer.from_pretrained(A_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A_ )
@require_torch
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A_ , return_tensors="""pt""" )
lowerCAmelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCAmelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A_ , A_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.tokenizer(self.src_text , padding=A_ , truncation=A_ , max_length=3 , return_tensors="""pt""" )
lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=10 , return_tensors="""pt""" )
lowerCAmelCase = targets["""input_ids"""]
lowerCAmelCase = shift_tokens_right(A_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3034, 2, 25_0004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_0001,
} , )
| 187 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = "timm_backbone"
def __init__( self , A_=None , A_=3 , A_=True , A_=True , A_=None , **A_ , ) -> int:
super().__init__(**A_ )
lowerCAmelCase = backbone
lowerCAmelCase = num_channels
lowerCAmelCase = features_only
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = True
lowerCAmelCase = out_indices if out_indices is not None else (-1,)
| 187 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.