code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_UpperCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowercase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = state_dict.pop(lowerCAmelCase__ )
__UpperCAmelCase : Tuple = val
def lowercase_ ( lowerCAmelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCAmelCase : Union[str, Any] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
__UpperCAmelCase : Tuple = value
else:
__UpperCAmelCase : Tuple = value
return new_state_dict
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str=False ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = """"""
if is_panoptic:
__UpperCAmelCase : List[str] = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCAmelCase : List[str] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__UpperCAmelCase : List[str] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : str = in_proj_weight[:256, :]
__UpperCAmelCase : List[Any] = in_proj_bias[:256]
__UpperCAmelCase : int = in_proj_weight[256:512, :]
__UpperCAmelCase : Optional[Any] = in_proj_bias[256:512]
__UpperCAmelCase : Union[str, Any] = in_proj_weight[-256:, :]
__UpperCAmelCase : int = in_proj_bias[-256:]
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Tuple = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowercase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__UpperCAmelCase : Optional[Any] = """resnet101"""
if "dc5" in model_name:
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Optional[Any] = """panoptic""" in model_name
if is_panoptic:
__UpperCAmelCase : List[Any] = 250
else:
__UpperCAmelCase : Any = 91
__UpperCAmelCase : Tuple = """huggingface/label-files"""
__UpperCAmelCase : Tuple = """coco-detection-id2label.json"""
__UpperCAmelCase : Tuple = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase : int = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[Any] = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
__UpperCAmelCase : str = """coco_panoptic""" if is_panoptic else """coco_detection"""
__UpperCAmelCase : Optional[Any] = ConditionalDetrImageProcessor(format=lowerCAmelCase__ )
# prepare image
__UpperCAmelCase : Any = prepare_img()
__UpperCAmelCase : Optional[int] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" )
__UpperCAmelCase : Any = encoding["""pixel_values"""]
logger.info(f'Converting model {model_name}...' )
# load original model from torch hub
__UpperCAmelCase : Optional[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , lowerCAmelCase__ , pretrained=lowerCAmelCase__ ).eval()
__UpperCAmelCase : List[str] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__UpperCAmelCase : Optional[Any] = """conditional_detr.""" + src
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = rename_backbone_keys(lowerCAmelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase__ , is_panoptic=lowerCAmelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCAmelCase : Any = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
__UpperCAmelCase : str = state_dict.pop(lowerCAmelCase__ )
__UpperCAmelCase : Any = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCAmelCase : Any = state_dict.pop(lowerCAmelCase__ )
__UpperCAmelCase : str = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
__UpperCAmelCase : Tuple = state_dict.pop(lowerCAmelCase__ )
__UpperCAmelCase : Dict = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__UpperCAmelCase : List[Any] = state_dict.pop(lowerCAmelCase__ )
__UpperCAmelCase : Any = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : int = ConditionalDetrForSegmentation(lowerCAmelCase__ ) if is_panoptic else ConditionalDetrForObjectDetection(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
model.push_to_hub(repo_id=lowerCAmelCase__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
__UpperCAmelCase : Optional[Any] = conditional_detr(lowerCAmelCase__ )
__UpperCAmelCase : Dict = model(lowerCAmelCase__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_UpperCamelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 254 |
'''simple docstring'''
from collections.abc import Sequence
def lowercase_ ( lowerCAmelCase__ : Sequence[float] , lowerCAmelCase__ : float ):
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(lowerCAmelCase__ ) )
def lowercase_ ( lowerCAmelCase__ : Sequence[float] , lowerCAmelCase__ : float ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = 0.0
for coeff in reversed(lowerCAmelCase__ ):
__UpperCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 254 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
lowerCamelCase : Any = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCamelCase : Tuple = BASE_URL + """/user"""
# https://github.com/settings/tokens
lowerCamelCase : Optional[Any] = os.environ.get('''USER_TOKEN''', '''''')
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : Dict = {
'''Authorization''': F"token {auth_token}",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_snake_case , headers=_snake_case ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 352 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 306 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 298 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ) -> None:
A : list[Any] = []
A : int = 0
A : int = 0
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> bool:
return self.head == self.tail
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , __lowerCamelCase : Any ) -> None:
self.data.append(__lowerCamelCase )
A : List[Any] = self.tail + 1
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
A : Dict = self.data[self.head]
A : List[str] = self.head + 1
return ret
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
return self.tail - self.head
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> None:
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : Any ) -> None:
A : int = data
A : MyNode | None = None
A : MyNode | None = None
A : int = 1
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
return self.data
def SCREAMING_SNAKE_CASE__ ( self : str ) -> MyNode | None:
return self.left
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> MyNode | None:
return self.right
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
return self.height
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : Any ) -> None:
A : Dict = data
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : MyNode | None ) -> None:
A : Tuple = node
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : MyNode | None ) -> None:
A : Tuple = node
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : int ) -> None:
A : List[str] = height
def UpperCAmelCase ( _lowerCamelCase ):
if node is None:
return 0
return node.get_height()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if a > b:
return a
return b
def UpperCAmelCase ( _lowerCamelCase ):
print("left rotation node:" , node.get_data() )
A : Union[str, Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_lowerCamelCase )
A : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCamelCase )
A : Union[str, Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCamelCase )
return ret
def UpperCAmelCase ( _lowerCamelCase ):
print("right rotation node:" , node.get_data() )
A : Tuple = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_lowerCamelCase )
A : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCamelCase )
A : int = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCamelCase )
return ret
def UpperCAmelCase ( _lowerCamelCase ):
A : Any = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_lowerCamelCase ) )
return right_rotation(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase ):
A : Dict = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_lowerCamelCase ) )
return left_rotation(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if node is None:
return MyNode(_lowerCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _lowerCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
A : int = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
A : Union[str, Any] = right_rotation(_lowerCamelCase )
else:
A : Union[str, Any] = lr_rotation(_lowerCamelCase )
else:
node.set_right(insert_node(node.get_right() , _lowerCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
A : Any = node.get_right()
assert right_child is not None
if data < right_child.get_data():
A : List[str] = rl_rotation(_lowerCamelCase )
else:
A : List[str] = left_rotation(_lowerCamelCase )
A : int = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCamelCase )
return node
def UpperCAmelCase ( _lowerCamelCase ):
while True:
A : Any = root.get_right()
if right_child is None:
break
A : Union[str, Any] = right_child
return root.get_data()
def UpperCAmelCase ( _lowerCamelCase ):
while True:
A : Optional[Any] = root.get_left()
if left_child is None:
break
A : Optional[Any] = left_child
return root.get_data()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Dict = root.get_left()
A : str = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
A : int = get_left_most(_lowerCamelCase )
root.set_data(_lowerCamelCase )
root.set_right(del_node(_lowerCamelCase , _lowerCamelCase ) )
elif left_child is not None:
A : Optional[int] = left_child
elif right_child is not None:
A : List[Any] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(_lowerCamelCase , _lowerCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_lowerCamelCase , _lowerCamelCase ) )
if get_height(_lowerCamelCase ) - get_height(_lowerCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
A : Tuple = left_rotation(_lowerCamelCase )
else:
A : Optional[Any] = rl_rotation(_lowerCamelCase )
elif get_height(_lowerCamelCase ) - get_height(_lowerCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
A : Dict = right_rotation(_lowerCamelCase )
else:
A : List[Any] = lr_rotation(_lowerCamelCase )
A : Dict = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_lowerCamelCase )
return root
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] ) -> None:
A : MyNode | None = None
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
return get_height(self.root )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Any ) -> None:
print("insert:" + str(__lowerCamelCase ) )
A : Optional[int] = insert_node(self.root , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Any ) -> None:
print("delete:" + str(__lowerCamelCase ) )
if self.root is None:
print("Tree is empty!" )
return
A : List[str] = del_node(self.root , __lowerCamelCase )
def __str__( self : int , ) -> str: # a level traversale, gives a more intuitive look on the tree
A : List[str] = ""
A : Optional[Any] = MyQueue()
q.push(self.root )
A : List[str] = self.get_height()
if layer == 0:
return output
A : Optional[int] = 0
while not q.is_empty():
A : str = q.pop()
A : Union[str, Any] = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__lowerCamelCase )
q.push(__lowerCamelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
A : Optional[int] = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , __lowerCamelCase ) - 1:
A : int = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCAmelCase ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__SCREAMING_SNAKE_CASE = AVLtree()
__SCREAMING_SNAKE_CASE = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 366 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCAmelCase ( _lowerCamelCase ):
A : Any = []
for line in lines:
A : List[str] = re.sub(R"#.*" , "" , _lowerCamelCase ) # remove comments
if line:
filtered_lines.append(_lowerCamelCase )
A : str = "\n".join(_lowerCamelCase )
# Make a hash from all this code
A : Any = full_str.encode("utf-8" )
return shaaaa(_lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
__SCREAMING_SNAKE_CASE = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__SCREAMING_SNAKE_CASE = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__SCREAMING_SNAKE_CASE = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__SCREAMING_SNAKE_CASE = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 256 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = 0
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(lowerCAmelCase__) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE_: Any = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(lowerCAmelCase__) , 0)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Any = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 20)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ , config=lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def _SCREAMING_SNAKE_CASE ( self : int):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(lowerCAmelCase__ , "vocab.txt"))
SCREAMING_SNAKE_CASE_: Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__ , tokenizer_type="bert" , use_fast=lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(lowerCAmelCase__ , "vocab.json"))
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(lowerCAmelCase__ , "merges.txt"))
SCREAMING_SNAKE_CASE_: Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__ , tokenizer_type="gpt2" , use_fast=lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : Dict):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(lowerCAmelCase__ , "vocab.txt"))
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ , tokenizer_type="bert")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(lowerCAmelCase__ , "vocab.json"))
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(lowerCAmelCase__ , "merges.txt"))
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase__ , tokenizer_type="gpt2")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
with pytest.raises(lowerCAmelCase__):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx")
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : Any):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE_: Any = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased")
self.assertIsInstance(lowerCAmelCase__ , (BertTokenizer, BertTokenizerFast))
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowerCAmelCase__)
else:
self.assertEqual(tokenizer.do_lower_case , lowerCAmelCase__)
self.assertEqual(tokenizer.model_max_length , 512)
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : List[str]):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCAmelCase__ , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists")
def _SCREAMING_SNAKE_CASE ( self : int):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE_: str = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE_: Dict = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCAmelCase__)
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=lowerCAmelCase__) , lowerCAmelCase__)
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased") , lowerCAmelCase__)
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = """Hello, world. How are you?"""
SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer.tokenize(lowerCAmelCase__)
self.assertEqual("[UNK]" , tokens[0])
SCREAMING_SNAKE_CASE_: Optional[int] = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = tokenizer.tokenize(lowerCAmelCase__)
self.assertEqual("[UNK]" , tokens[0])
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config")
self.assertEqual(type(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(tokenizer.model_max_length , 512)
self.assertEqual(tokenizer.vocab_size , 3_0000)
self.assertEqual(tokenizer.unk_token , "[UNK]")
self.assertEqual(tokenizer.padding_side , "right")
self.assertEqual(tokenizer.truncation_side , "right")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 12)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = AutoTokenizer.from_pretrained("ctrl")
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE_: Tuple = get_tokenizer_config("bert-base-cased")
SCREAMING_SNAKE_CASE_: Tuple = config.pop("_commit_hash" , lowerCAmelCase__)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCAmelCase__ , {"do_lower_case": False})
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_tokenizer_config(lowerCAmelCase__)
self.assertDictEqual(lowerCAmelCase__ , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE_: List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = get_tokenizer_config(lowerCAmelCase__)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer")
def _SCREAMING_SNAKE_CASE ( self : Any):
try:
AutoConfig.register("custom" , lowerCAmelCase__)
AutoTokenizer.register(lowerCAmelCase__ , slow_tokenizer_class=lowerCAmelCase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__):
AutoTokenizer.register(lowerCAmelCase__ , slow_tokenizer_class=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = CustomTokenizer.from_pretrained(lowerCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
try:
AutoConfig.register("custom" , lowerCAmelCase__)
# Can register in two steps
AutoTokenizer.register(lowerCAmelCase__ , slow_tokenizer_class=lowerCAmelCase__)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(lowerCAmelCase__ , fast_tokenizer_class=lowerCAmelCase__)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCAmelCase__ , slow_tokenizer_class=lowerCAmelCase__ , fast_tokenizer_class=lowerCAmelCase__)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__):
AutoTokenizer.register(lowerCAmelCase__ , fast_tokenizer_class=lowerCAmelCase__)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_: int = BertTokenizerFast.from_pretrained(lowerCAmelCase__)
bert_tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = CustomTokenizerFast.from_pretrained(lowerCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase__ , use_fast=lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer")
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase__)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast")
# Test we can also load the slow version
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase__ , use_fast=lowerCAmelCase__)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ , use_fast=lowerCAmelCase__)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer")
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer")
@require_tokenizers
def _SCREAMING_SNAKE_CASE ( self : str):
class __lowercase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase : Tuple = False
class __lowercase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = NewTokenizer
_UpperCAmelCase : Any = False
try:
AutoConfig.register("custom" , lowerCAmelCase__)
AutoTokenizer.register(lowerCAmelCase__ , slow_tokenizer_class=lowerCAmelCase__)
AutoTokenizer.register(lowerCAmelCase__ , fast_tokenizer_class=lowerCAmelCase__)
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_: Dict = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer")
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertFalse(tokenizer.special_attribute_present)
SCREAMING_SNAKE_CASE_: Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=lowerCAmelCase__)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_: List[str] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertFalse(tokenizer.special_attribute_present)
SCREAMING_SNAKE_CASE_: Any = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase__ , use_fast=lowerCAmelCase__)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_: Dict = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertTrue(tokenizer.special_attribute_present)
SCREAMING_SNAKE_CASE_: str = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=lowerCAmelCase__ , use_fast=lowerCAmelCase__)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=lowerCAmelCase__)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
# Test we can also load the slow version
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=lowerCAmelCase__ , use_fast=lowerCAmelCase__)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
def _SCREAMING_SNAKE_CASE ( self : str):
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier"):
SCREAMING_SNAKE_CASE_: Any = AutoTokenizer.from_pretrained("bert-base")
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
with self.assertRaisesRegex(
lowerCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
SCREAMING_SNAKE_CASE_: Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__ , revision="aaaaaa")
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE_: Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 13 |
from __future__ import annotations
import math
lowerCamelCase : Optional[int] = '2020.9.26'
lowerCamelCase : int = 'xcodz-dot, cclaus, dhruvmanila'
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> tuple[float, float]:
if not all(isinstance(lowercase ,(float, int) ) for val in locals().values() ):
snake_case : Dict = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(lowercase )
snake_case : List[str] = ((x * distance) / (z + distance)) * scale
snake_case : Dict = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> tuple[float, float, float]:
if not isinstance(lowercase ,lowercase ):
raise TypeError("""Axis must be a str""" )
snake_case : Tuple = locals()
del input_variables["axis"]
if not all(isinstance(lowercase ,(float, int) ) for val in input_variables.values() ):
snake_case : int = (
"""Input values except axis must either be float or int: """
f"""{list(input_variables.values() )}"""
)
raise TypeError(lowercase )
snake_case : int = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
snake_case : str = x * math.cos(lowercase ) - y * math.sin(lowercase )
snake_case : List[Any] = y * math.cos(lowercase ) + x * math.sin(lowercase )
snake_case : Optional[int] = z
elif axis == "x":
snake_case : Optional[Any] = y * math.cos(lowercase ) - z * math.sin(lowercase )
snake_case : Optional[int] = z * math.cos(lowercase ) + y * math.sin(lowercase )
snake_case : Optional[int] = x
elif axis == "y":
snake_case : List[str] = x * math.cos(lowercase ) - z * math.sin(lowercase )
snake_case : Tuple = z * math.cos(lowercase ) + x * math.sin(lowercase )
snake_case : Optional[int] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 124 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : int = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = False ):
lowercase_ :List[str] = scheduler
lowercase_ :Optional[Any] = optimizers if isinstance(UpperCamelCase_ , (list, tuple) ) else [optimizers]
lowercase_ :Tuple = split_batches
lowercase_ :str = step_with_optimizer
lowercase_ :int = GradientState()
def UpperCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowercase_ :Optional[Any] = AcceleratorState().num_processes
for _ in range(UpperCamelCase_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self ):
return self.scheduler.get_last_lr()
def UpperCamelCase ( self ):
return self.scheduler.state_dict()
def UpperCamelCase ( self , UpperCamelCase_ ):
self.scheduler.load_state_dict(UpperCamelCase_ )
def UpperCamelCase ( self ):
return self.scheduler.get_lr()
def UpperCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.scheduler.print_lr(*UpperCamelCase_ , **UpperCamelCase_ )
| 252 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : list[str]):
a : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []})
for keyword in keywords:
self.add_keyword(__UpperCAmelCase)
self.set_fail_transitions()
def __snake_case ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : str):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __snake_case ( self : str , __UpperCAmelCase : str):
a : Dict = 0
for character in keyword:
a : Any = self.find_next_state(__UpperCAmelCase , __UpperCAmelCase)
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
a : Tuple = len(self.adlist) - 1
else:
a : Tuple = next_state
self.adlist[current_state]["output"].append(__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCAmelCase)
a : Optional[Any] = 0
while q:
a : Optional[int] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCAmelCase)
a : List[str] = self.adlist[r]["fail_state"]
while (
self.find_next_state(__UpperCAmelCase , self.adlist[child]["value"]) is None
and state != 0
):
a : Optional[int] = self.adlist[state]["fail_state"]
a : Dict = self.find_next_state(
__UpperCAmelCase , self.adlist[child]["value"])
if self.adlist[child]["fail_state"] is None:
a : Optional[int] = 0
a : Union[str, Any] = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
a : dict = {} # returns a dict with keywords and list of its occurrences
a : Any = 0
for i in range(len(__UpperCAmelCase)):
while (
self.find_next_state(__UpperCAmelCase , string[i]) is None
and current_state != 0
):
a : Any = self.adlist[current_state]["fail_state"]
a : str = self.find_next_state(__UpperCAmelCase , string[i])
if next_state is None:
a : Dict = 0
else:
a : Optional[int] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
a : List[str] = []
result[key].append(i - len(__UpperCAmelCase) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A ={'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 283 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _snake_case ( a__ ):
lowerCAmelCase :torch.FloatTensor
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 3 , _lowerCamelCase = ("DownEncoderBlock2D",) , _lowerCamelCase = ("UpDecoderBlock2D",) , _lowerCamelCase = (64,) , _lowerCamelCase = 1 , _lowerCamelCase = "silu" , _lowerCamelCase = 3 , _lowerCamelCase = 32 , _lowerCamelCase = 256 , _lowerCamelCase = 32 , _lowerCamelCase = None , _lowerCamelCase = 0.18215 , _lowerCamelCase = "group" , ):
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ : str = Encoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , down_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , double_z=_lowerCamelCase , )
UpperCAmelCase__ : Optional[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ : Any = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1)
UpperCAmelCase__ : Optional[int] = VectorQuantizer(_lowerCamelCase , _lowerCamelCase , beta=0.25 , remap=_lowerCamelCase , sane_index_shape=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1)
# pass init params to Decoder
UpperCAmelCase__ : str = Decoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , up_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , norm_type=_lowerCamelCase , )
@apply_forward_hook
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = True):
UpperCAmelCase__ : Union[str, Any] = self.encoder(_lowerCamelCase)
UpperCAmelCase__ : str = self.quant_conv(_lowerCamelCase)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCamelCase)
@apply_forward_hook
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = True):
# also go through quantization layer
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.quantize(_lowerCamelCase)
else:
UpperCAmelCase__ : Union[str, Any] = h
UpperCAmelCase__ : Any = self.post_quant_conv(_lowerCamelCase)
UpperCAmelCase__ : Any = self.decoder(_lowerCamelCase , quant if self.config.norm_type == """spatial""" else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = True):
UpperCAmelCase__ : Dict = sample
UpperCAmelCase__ : Dict = self.encode(_lowerCamelCase).latents
UpperCAmelCase__ : List[str] = self.decode(_lowerCamelCase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase)
| 283 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = StableDiffusionSAGPipeline
__UpperCAmelCase : Dict = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : int = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : Optional[int] = False
def _lowercase ( self : Any ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, )
__lowercase = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=UpperCAmelCase__, set_alpha_to_one=UpperCAmelCase__, )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, )
__lowercase = CLIPTextModel(UpperCAmelCase__ )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowercase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowercase ( self : Optional[int], UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[int]=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
__lowercase = torch.manual_seed(UpperCAmelCase__ )
else:
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__lowercase = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : int ):
__lowercase = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
__lowercase = sag_pipe.to(UpperCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "."
__lowercase = torch.manual_seed(0 )
__lowercase = sag_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=2_0, output_type="np" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _lowercase ( self : Union[str, Any] ):
__lowercase = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__lowercase = sag_pipe.to(UpperCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "."
__lowercase = torch.manual_seed(0 )
__lowercase = sag_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=2_0, output_type="np" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def _lowercase ( self : int ):
__lowercase = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
__lowercase = sag_pipe.to(UpperCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "."
__lowercase = torch.manual_seed(0 )
__lowercase = sag_pipe(
[prompt], width=7_6_8, height=5_1_2, generator=UpperCAmelCase__, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=2_0, output_type="np", )
__lowercase = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 17 |
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 | 0 |
import random
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = a[left_index]
__a = left_index + 1
for j in range(left_index + 1 , _UpperCAmelCase ):
if a[j] < pivot:
__a , __a = a[i], a[j]
i += 1
__a , __a = a[i - 1], a[left_index]
return i - 1
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if left < right:
__a = random.randint(_UpperCAmelCase , right - 1 )
__a , __a = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__a = partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
quick_sort_random(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_UpperCAmelCase , pivot_index + 1 , _UpperCAmelCase ) # recursive quicksort to the right of the pivot point
def __snake_case ( ):
__a = input('''Enter numbers separated by a comma:\n''' ).strip()
__a = [int(_UpperCAmelCase ) for item in user_input.split(''',''' )]
quick_sort_random(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) )
print(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 131 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :Tuple = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[int] = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__snake_case :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 131 | 1 |
from __future__ import annotations
from cmath import sqrt
def lowercase__ ( __snake_case : int , __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
UpperCAmelCase_ : str = b * b - 4 * a * c
UpperCAmelCase_ : Optional[int] = (-b + sqrt(a__ )) / (2 * a)
UpperCAmelCase_ : Optional[Any] = (-b - sqrt(a__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 29 |
"""simple docstring"""
def lowercase ( a__ : float , a__ : float ) -> float:
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case__ :
"""simple docstring"""
__lowerCAmelCase :int
__lowerCAmelCase :TreeNode | None = None
__lowerCAmelCase :TreeNode | None = None
_lowercase : List[Any] =namedtuple("CoinsDistribResult", "moves excess")
def lowerCAmelCase_ ( _lowercase : TreeNode | None) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(_lowercase : TreeNode | None) -> int:
if node is None:
return 0
return count_nodes(node.left) + count_nodes(node.right) + 1
def count_coins(_lowercase : TreeNode | None) -> int:
if node is None:
return 0
return count_coins(node.left) + count_coins(node.right) + node.data
if count_nodes(_lowercase) != count_coins(_lowercase):
raise ValueError("""The nodes number should be same as the number of coins""")
# Main calculation
def get_distrib(_lowercase : TreeNode | None) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1)
a__ , a__ : Any = get_distrib(node.left)
a__ , a__ : Any = get_distrib(node.right)
a__ : Optional[int] = 1 - left_distrib_excess
a__ : int = 1 - right_distrib_excess
a__ : Dict = (
left_distrib_moves
+ right_distrib_moves
+ abs(_lowercase)
+ abs(_lowercase)
)
a__ : Union[str, Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_lowercase , _lowercase)
return get_distrib(_lowercase)[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : Union[str, Any] =16
_lowercase : Dict =32
def lowerCAmelCase_ ( _lowercase : Accelerator , _lowercase : int = 16 , _lowercase : str = "bert-base-cased") -> Union[str, Any]:
"""simple docstring"""
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(_lowercase)
a__ : Any = load_dataset("""glue""" , """mrpc""")
def tokenize_function(_lowercase : str):
# max_length=None => use the model max length (it's actually the default)
a__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowercase , max_length=_lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ : Any = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_lowercase)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(_lowercase : Optional[Any]):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return tokenizer.pad(_lowercase , padding="""longest""" , return_tensors="""pt""")
# Instantiate dataloaders.
a__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase)
a__ : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase)
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Dict) -> int:
"""simple docstring"""
# Initialize accelerator
a__ : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Union[str, Any] = config["""lr"""]
a__ : List[str] = int(config["""num_epochs"""])
a__ : List[str] = int(config["""seed"""])
a__ : Tuple = int(config["""batch_size"""])
a__ : int = args.model_name_or_path
set_seed(_lowercase)
a__ , a__ : int = get_dataloaders(_lowercase , _lowercase , _lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase)
# Instantiate optimizer
a__ : int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowercase)
if accelerator.state.deepspeed_plugin is not None:
a__ : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
a__ : List[str] = 1
a__ : List[Any] = (len(_lowercase) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ : Dict = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
a__ : Dict = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : List[Any] = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
# We need to keep track of how many total steps we have iterated over
a__ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
a__ : Optional[int] = 0
# Now we train the model
a__ : Tuple = evaluate.load("""glue""" , """mrpc""")
a__ : List[Any] = 0
a__ : Tuple = {}
for epoch in range(_lowercase , _lowercase):
model.train()
for step, batch in enumerate(_lowercase):
a__ : Union[str, Any] = model(**_lowercase)
a__ : Tuple = outputs.loss
a__ : Any = loss / gradient_accumulation_steps
accelerator.backward(_lowercase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
a__ : int = 0
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
a__ : str = model(**_lowercase)
a__ : Union[str, Any] = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
a__ , a__ : Optional[int] = accelerator.gather(
(predictions, batch["""labels"""])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase) - 1:
a__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset) - samples_seen]
a__ : Any = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
a__ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowercase)
a__ : Any = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
a__ : List[str] = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""") , """w""") as f:
json.dump(_lowercase , _lowercase)
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""")
parser.add_argument(
"""--model_name_or_path""" , type=_lowercase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_lowercase , )
parser.add_argument(
"""--output_dir""" , type=_lowercase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=_lowercase , default=_lowercase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=_lowercase , default=3 , help="""Number of train epochs.""" , )
a__ : Any = parser.parse_args()
a__ : Dict = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(_lowercase , _lowercase)
if __name__ == "__main__":
main()
| 266 | 1 |
import enum
import shutil
import sys
UpperCAmelCase, UpperCAmelCase : Union[str, Any] = shutil.get_terminal_size()
UpperCAmelCase : Dict = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class __lowercase ( enum.Enum ):
"""simple docstring"""
UpperCamelCase : Any = 0
UpperCamelCase : int = 1
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any="" ):
'''simple docstring'''
sys.stdout.write(str(lowerCamelCase__ ) + end )
sys.stdout.flush()
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple="" ):
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
forceWrite("""\r""" )
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : str ):
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __lowerCamelCase ( ):
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def __lowerCamelCase ( ):
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 252 |
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str]=0 ):
'''simple docstring'''
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[column] )
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=float("""inf""" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCamelCase__ ):
lowerCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase = current_dis
return min_dis
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any]=float("""inf""" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , lowerCamelCase__ ):
for j in range(max(0 , i - 6 ) , lowerCamelCase__ ):
lowerCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase = current_dis
return min_dis
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(lowerCamelCase__ , lowerCamelCase__ )
# recursion
lowerCamelCase = points_counts // 2
lowerCamelCase = closest_pair_of_points_sqr(
lowerCamelCase__ , points_sorted_on_y[:mid] , lowerCamelCase__ )
lowerCamelCase = closest_pair_of_points_sqr(
lowerCamelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
lowerCamelCase = min(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCamelCase__ )
lowerCamelCase = dis_between_closest_in_strip(
lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
return min(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = column_based_sort(lowerCamelCase__ , column=0 )
lowerCamelCase = column_based_sort(lowerCamelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase : Dict = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 252 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : Dict = BlipImageProcessor()
lowerCAmelCase : Optional[Any] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
lowerCAmelCase : Dict = BlipaProcessor(snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase : Optional[int] = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
lowerCAmelCase : str = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : int = self.get_tokenizer()
lowerCAmelCase : List[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = image_processor(snake_case__ , return_tensors="np" )
lowerCAmelCase : Optional[int] = processor(images=snake_case__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : Optional[int] = "lower newer"
lowerCAmelCase : int = processor(text=snake_case__ )
lowerCAmelCase : int = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : Optional[int] = self.get_tokenizer()
lowerCAmelCase : str = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : List[str] = "lower newer"
lowerCAmelCase : List[Any] = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Optional[Any] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Dict = processor.batch_decode(snake_case__ )
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = BlipaProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : int = "lower newer"
lowerCAmelCase : List[Any] = self.prepare_image_inputs()
lowerCAmelCase : Any = processor(text=snake_case__ , images=snake_case__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 133 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 13 , snake_case__ = 64 , snake_case__ = 2 , snake_case__ = 3 , snake_case__ = 3 , snake_case__ = True , snake_case__ = True , snake_case__ = 128 , snake_case__=[16, 32, 64, 128] , snake_case__ = 7 , snake_case__ = 4 , snake_case__ = 37 , snake_case__ = "gelu" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 10 , snake_case__ = 0.02 , snake_case__ = 2 , snake_case__ = 1 , snake_case__ = 128 , snake_case__ = [2, 2, 2, 2] , snake_case__ = 2 , snake_case__ = 2 , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : int = image_size
lowerCAmelCase : int = patch_size
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : int = is_training
lowerCAmelCase : Tuple = use_labels
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Dict = num_hidden_layers
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[int] = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : List[str] = encoder_stride
lowerCAmelCase : Union[str, Any] = num_attention_outputs
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Tuple = embed_dim + 1
lowerCAmelCase : str = resolution
lowerCAmelCase : Optional[Any] = depths
lowerCAmelCase : Any = hidden_sizes
lowerCAmelCase : List[str] = dim
lowerCAmelCase : str = mlp_expansion_ratio
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFEfficientFormerModel(config=snake_case__ )
lowerCAmelCase : Optional[int] = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.type_sequence_label_size
lowerCAmelCase : Dict = TFEfficientFormerForImageClassification(snake_case__ )
lowerCAmelCase : Tuple = model(snake_case__ , labels=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase : str = 1
lowerCAmelCase : Any = TFEfficientFormerForImageClassification(snake_case__ )
lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : str = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = config_and_inputs
lowerCAmelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a : Union[str, Any] =(
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a : int =False
a : Optional[Any] =False
a : List[Any] =False
a : str =False
a : List[Any] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = TFEfficientFormerModelTester(self )
lowerCAmelCase : Dict = ConfigTester(
self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = model_class(snake_case__ )
lowerCAmelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = model_class(snake_case__ )
lowerCAmelCase : List[str] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : List[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
if hasattr(self.model_tester , "encoder_seq_length" ):
lowerCAmelCase : Union[str, Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
lowerCAmelCase : Tuple = seq_length * self.model_tester.chunk_length
else:
lowerCAmelCase : List[str] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCAmelCase : Tuple = outputs.decoder_hidden_states
self.asseretIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , snake_case__ )
lowerCAmelCase : int = getattr(self.model_tester , "seq_length" , snake_case__ )
lowerCAmelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , snake_case__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : str = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[Any] = TFEfficientFormerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : int = True
lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , "seq_length" , snake_case__ )
lowerCAmelCase : Dict = getattr(self.model_tester , "encoder_seq_length" , snake_case__ )
lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , "key_length" , snake_case__ )
lowerCAmelCase : List[str] = getattr(self.model_tester , "chunk_length" , snake_case__ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
lowerCAmelCase : Dict = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCAmelCase : int = True
lowerCAmelCase : int = False
lowerCAmelCase : Dict = True
lowerCAmelCase : List[Any] = model_class(snake_case__ )
lowerCAmelCase : Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
lowerCAmelCase : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase : int = True
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : int = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
lowerCAmelCase : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCAmelCase : List[str] = model_class(snake_case__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCAmelCase : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=snake_case__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.assertTrue(outputs_dict is not None )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
lowerCAmelCase : Tuple = self.default_image_processor
lowerCAmelCase : Tuple = prepare_img()
lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : Optional[int] = model(**snake_case__ , training=snake_case__ )
# verify the logits
lowerCAmelCase : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : Tuple = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
lowerCAmelCase : Optional[int] = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Tuple = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : Dict = model(**snake_case__ , training=snake_case__ )
# verify the logits
lowerCAmelCase : Optional[int] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : Any = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 133 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ):
'''simple docstring'''
lowerCamelCase : Any = [0, 1]
lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 283 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=10 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , ):
lowercase__: Dict = parent
lowercase__: List[Any] = batch_size
lowercase__: Optional[int] = image_size
lowercase__: Dict = num_channels
lowercase__: Union[str, Any] = embeddings_size
lowercase__: List[str] = hidden_sizes
lowercase__: int = depths
lowercase__: Union[str, Any] = is_training
lowercase__: int = use_labels
lowercase__: List[Any] = hidden_act
lowercase__: Dict = num_labels
lowercase__: Any = scope
lowercase__: Dict = len(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: Dict = None
if self.use_labels:
lowercase__: Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase__: str = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = TFResNetModel(config=_UpperCAmelCase )
lowercase__: Tuple = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = self.num_labels
lowercase__: int = TFResNetForImageClassification(_UpperCAmelCase )
lowercase__: Tuple = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__: str = config_and_inputs
lowercase__: Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase :List[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :Optional[Any] = False
def _snake_case ( self ):
lowercase__: Tuple = TFResNetModelTester(self )
lowercase__: Any = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ):
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def _snake_case ( self ):
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
lowercase__, lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Union[str, Any] = model_class(_UpperCAmelCase )
lowercase__: Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: str = [*signature.parameters.keys()]
lowercase__: Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _snake_case ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = model_class(_UpperCAmelCase )
lowercase__: str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__: Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__: str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__, lowercase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__: Dict = layer_type
lowercase__: Any = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__: Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _snake_case ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Union[str, Any] = TFResNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
lowercase__: Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ):
lowercase__: str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__: int = self.default_image_processor
lowercase__: Any = prepare_img()
lowercase__: List[Any] = image_processor(images=_UpperCAmelCase , return_tensors='''tf''' )
# forward pass
lowercase__: Tuple = model(**_UpperCAmelCase )
# verify the logits
lowercase__: Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__: List[Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1e-4 ) )
| 2 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '''https://openaipublic.azureedge.net/jukebox/models/'''
lowerCamelCase = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def lowerCamelCase_ ( _a ):
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ : Any = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ : Tuple = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ : Any = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ : Dict = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCAmelCase__ : Any = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCAmelCase__ : Union[str, Any] = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCAmelCase__ : List[str] = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCAmelCase__ : Dict = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def lowerCamelCase_ ( _a , _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = {}
import re
lowerCAmelCase__ : Any = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase__ : Tuple = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ : List[Any] = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ : Optional[Any] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase__ : List[str] = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ : List[Any] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ : List[str] = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase__ : Optional[Any] = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ : Dict = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_a ):
lowerCAmelCase__ : List[str] = re_encoder_block_conv_in.match(_a )
lowerCAmelCase__ : List[str] = regex_match.groups()
lowerCAmelCase__ : Any = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase__ : List[Any] = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
lowerCAmelCase__ : Optional[int] = re_encoder_block_conv_in.sub(_a , _a )
elif re_encoder_block_resnet.fullmatch(_a ):
lowerCAmelCase__ : List[str] = re_encoder_block_resnet.match(_a )
lowerCAmelCase__ : Dict = regex_match.groups()
lowerCAmelCase__ : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase__ : Optional[int] = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase__ : Optional[Any] = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
lowerCAmelCase__ : Tuple = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowerCAmelCase__ : List[Any] = prefix + resnet_block
lowerCAmelCase__ : Optional[int] = re_encoder_block_resnet.sub(_a , _a )
elif re_encoder_block_proj_out.fullmatch(_a ):
lowerCAmelCase__ : Any = re_encoder_block_proj_out.match(_a )
lowerCAmelCase__ : List[str] = regex_match.groups()
lowerCAmelCase__ : Union[str, Any] = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
lowerCAmelCase__ : Optional[int] = re_encoder_block_proj_out.sub(_a , _a )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_a ):
lowerCAmelCase__ : int = re_decoder_block_conv_out.match(_a )
lowerCAmelCase__ : Any = regex_match.groups()
lowerCAmelCase__ : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase__ : List[str] = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
lowerCAmelCase__ : int = re_decoder_block_conv_out.sub(_a , _a )
elif re_decoder_block_resnet.fullmatch(_a ):
lowerCAmelCase__ : str = re_decoder_block_resnet.match(_a )
lowerCAmelCase__ : int = regex_match.groups()
lowerCAmelCase__ : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase__ : str = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase__ : List[Any] = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
lowerCAmelCase__ : Tuple = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowerCAmelCase__ : Optional[Any] = prefix + resnet_block
lowerCAmelCase__ : Any = re_decoder_block_resnet.sub(_a , _a )
elif re_decoder_block_proj_in.fullmatch(_a ):
lowerCAmelCase__ : Union[str, Any] = re_decoder_block_proj_in.match(_a )
lowerCAmelCase__ : List[str] = regex_match.groups()
lowerCAmelCase__ : Union[str, Any] = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
lowerCAmelCase__ : int = re_decoder_block_proj_in.sub(_a , _a )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_a ):
lowerCAmelCase__ : Dict = re_prior_cond_conv_out.match(_a )
lowerCAmelCase__ : List[str] = regex_match.groups()
lowerCAmelCase__ : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase__ : Optional[Any] = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
lowerCAmelCase__ : Union[str, Any] = re_prior_cond_conv_out.sub(_a , _a )
elif re_prior_cond_resnet.fullmatch(_a ):
lowerCAmelCase__ : int = re_prior_cond_resnet.match(_a )
lowerCAmelCase__ : List[str] = regex_match.groups()
lowerCAmelCase__ : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase__ : Any = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase__ : Optional[Any] = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
lowerCAmelCase__ : List[str] = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowerCAmelCase__ : int = prefix + resnet_block
lowerCAmelCase__ : Optional[int] = re_prior_cond_resnet.sub(_a , _a )
elif re_prior_cond_proj_in.fullmatch(_a ):
lowerCAmelCase__ : str = re_prior_cond_proj_in.match(_a )
lowerCAmelCase__ : Tuple = regex_match.groups()
lowerCAmelCase__ : List[Any] = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
lowerCAmelCase__ : Optional[Any] = re_prior_cond_proj_in.sub(_a , _a )
# keep original key
else:
lowerCAmelCase__ : Optional[int] = original_key
lowerCAmelCase__ : Any = replace_key(_a )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
lowerCAmelCase__ : Union[str, Any] = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
lowerCAmelCase__ : Optional[int] = original_key
lowerCAmelCase__ : str = original_key
lowerCAmelCase__ : Optional[int] = value
return new_dict
@torch.no_grad()
def lowerCamelCase_ ( _a=None , _a=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
lowerCAmelCase__ : List[Any] = requests.get(f'{PREFIX}{file}' , allow_redirects=_a )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=_a )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content )
lowerCAmelCase__ : List[str] = MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCAmelCase__ : int = JukeboxConfig.from_pretrained(_a )
lowerCAmelCase__ : List[Any] = JukeboxModel(_a )
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Optional[Any] = {}
for i, dict_name in enumerate(_a ):
lowerCAmelCase__ : List[str] = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model''']
lowerCAmelCase__ : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCAmelCase__ : str = old_dic[k]
elif k.endswith('''.w''' ):
lowerCAmelCase__ : Optional[int] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCAmelCase__ : Tuple = old_dic[k]
else:
lowerCAmelCase__ : Dict = old_dic[k]
lowerCAmelCase__ : Union[str, Any] = '''vqvae''' if i == 0 else f'priors.{3 - i}'
lowerCAmelCase__ : Optional[Any] = fix_jukebox_keys(_a , model.state_dict() , _a , _a )
weight_dict.append(_a )
lowerCAmelCase__ : str = weight_dict.pop(0 )
model.vqvae.load_state_dict(_a )
for i in range(len(_a ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_a ).mkdir(exist_ok=_a )
with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile:
json.dump(_a , _a )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
return weight_dict
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowerCamelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 131 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class _a ( _lowercase):
def __init__( self : Optional[int] , *_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Optional[Any] )-> None:
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 131 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
UpperCAmelCase__ = project_dim
UpperCAmelCase__ = pooler_fn
UpperCAmelCase__ = learn_encoder
UpperCAmelCase__ = use_attention_mask
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [r"""pooler""", r"""logit_scale"""]
__SCREAMING_SNAKE_CASE = [r"""position_ids""", r"""predictions.decoder.bias"""]
__SCREAMING_SNAKE_CASE = """roberta"""
__SCREAMING_SNAKE_CASE = RobertaSeriesConfig
def __init__(self , __a ) -> str:
"""simple docstring"""
super().__init__(__a )
UpperCAmelCase__ = XLMRobertaModel(__a )
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
UpperCAmelCase__ = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCamelCase__ (self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase__ = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
UpperCAmelCase__ = outputs['hidden_states'][-2]
UpperCAmelCase__ = self.pre_LN(__a )
UpperCAmelCase__ = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCAmelCase__ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 370 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__(self , *,
__a = 4 , __a = 768 , __a , __a , ) -> str:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.Linear(__a , __a )
# parameters for encoder hidden states
UpperCAmelCase__ = clip_extra_context_tokens
UpperCAmelCase__ = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase__ = nn.Linear(__a , __a )
UpperCAmelCase__ = nn.LayerNorm(__a )
def UpperCamelCase__ (self , *, __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase__ = image_embeddings.shape[0]
UpperCAmelCase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase__ = classifier_free_guidance_embeddings.expand(
__a , -1 )
UpperCAmelCase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase__ = self.embedding_proj(__a )
UpperCAmelCase__ = self.clip_image_embeddings_project_to_time_embeddings(__a )
UpperCAmelCase__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase__ = self.clip_extra_context_tokens_proj(__a )
UpperCAmelCase__ = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
UpperCAmelCase__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase__ = self.encoder_hidden_states_proj(__a )
UpperCAmelCase__ = self.text_encoder_hidden_states_norm(__a )
UpperCAmelCase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 335 | 0 |
"""simple docstring"""
lowercase_ = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowercase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowercase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 266 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ):
__A = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sgugger/tiny-distilbert-classification'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
__A = None
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = '''sshleifer/tinier_bart'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''sshleifer/tinier_bart'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ):
self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
| 266 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Dict:
A = """ZinengTang/tvlt-base"""
A = tempfile.mkdtemp()
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> str:
return TvltImageProcessor.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> Any:
return TvltFeatureExtractor.from_pretrained(self.checkpoint ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.get_image_processor()
A = self.get_feature_extractor()
A = TvltProcessor(image_processor=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
A = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor ,lowerCamelCase_ )
self.assertIsInstance(processor.image_processor ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Any:
A = self.get_image_processor()
A = self.get_feature_extractor()
A = TvltProcessor(image_processor=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ )
A = np.ones([1_2_0_0_0] )
A = feature_extractor(lowerCamelCase_ ,return_tensors="""np""" )
A = processor(audio=lowerCamelCase_ ,return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ) -> Any:
A = self.get_image_processor()
A = self.get_feature_extractor()
A = TvltProcessor(image_processor=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ )
A = np.ones([3, 2_2_4, 2_2_4] )
A = image_processor(lowerCamelCase_ ,return_tensors="""np""" )
A = processor(images=lowerCamelCase_ ,return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ) -> int:
A = self.get_image_processor()
A = self.get_feature_extractor()
A = TvltProcessor(image_processor=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ )
A = np.ones([1_2_0_0_0] )
A = np.ones([3, 2_2_4, 2_2_4] )
A = processor(audio=lowerCamelCase_ ,images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def UpperCamelCase__ ( self ) -> int:
A = self.get_image_processor()
A = self.get_feature_extractor()
A = TvltProcessor(image_processor=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ )
self.assertListEqual(
processor.model_input_names ,image_processor.model_input_names + feature_extractor.model_input_names ,msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" ,)
| 77 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase =logging.get_logger(__name__)
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = True ,lowerCamelCase_ = 1 / 2_5_5 ,lowerCamelCase_ = True ,lowerCamelCase_ = IMAGENET_DEFAULT_MEAN ,lowerCamelCase_ = IMAGENET_DEFAULT_STD ,**lowerCamelCase_ ,) -> None:
super().__init__(**lowerCamelCase_ )
A = size if size is not None else {"""shortest_edge""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
A = int((2_5_6 / 2_2_4) * size["""shortest_edge"""] )
A = get_resize_output_image_size(lowerCamelCase_ ,size=lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
lowerCamelCase_ ,size=(size_dict["""height"""], size_dict["""width"""]) ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = ChannelDimension.FIRST ,**lowerCamelCase_ ,) -> BatchFeature:
A = do_resize if do_resize is not None else self.do_resize
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
A = [self.resize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_center_crop:
A = [self.center_crop(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_rescale:
A = [self.rescale(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_normalize:
A = [self.normalize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 77 | 1 |
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
_UpperCAmelCase = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
_UpperCAmelCase = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(snake_case_ , 1 ):
if n < _p:
# then we have our last prime to check
_UpperCAmelCase = primes[:idx]
break
_UpperCAmelCase , _UpperCAmelCase = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
_UpperCAmelCase = False
for r in range(snake_case_ ):
_UpperCAmelCase = pow(snake_case_ , d * 2**r , snake_case_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
_UpperCAmelCase = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 133 |
def __SCREAMING_SNAKE_CASE ( snake_case_ = 1000 ):
'''simple docstring'''
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 133 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( A_ , A_ ):
__a = """maskformer-swin"""
__a = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any]=224 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : int=96 , _lowerCamelCase : str=[2, 2, 6, 2] , _lowerCamelCase : Union[str, Any]=[3, 6, 12, 24] , _lowerCamelCase : List[str]=7 , _lowerCamelCase : Dict=4.0 , _lowerCamelCase : Tuple=True , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Union[str, Any]=0.0_2 , _lowerCamelCase : int=1e-5 , _lowerCamelCase : List[str]=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ):
super().__init__(**_lowerCamelCase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(_lowerCamelCase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_snake_case = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
_snake_case = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
_snake_case , _snake_case = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 40 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Dict=3 , _lowerCamelCase : Optional[int]=32 * 8 , _lowerCamelCase : Optional[int]=32 * 8 , _lowerCamelCase : Dict=4 , _lowerCamelCase : Optional[int]=64 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = hidden_dim
_snake_case = hidden_dim
def lowercase ( self : List[str] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : Optional[Any] ):
_snake_case = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_snake_case = self.num_queries
_snake_case = self.num_labels
_snake_case = [1, 1, 1, 1]
_snake_case = self.num_channels
_snake_case = 64
_snake_case = 128
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
return config
def lowercase ( self : Any ):
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.prepare_config_and_inputs()
_snake_case = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase ( self : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int ):
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def lowercase ( self : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=False ):
with torch.no_grad():
_snake_case = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] ):
_snake_case = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
_snake_case = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__a = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : int ):
_snake_case = MaskaFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : Any ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowercase ( self : Dict ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowercase ( self : int ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowercase ( self : List[str] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase ( self : Optional[int] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase ( self : str ):
pass
def lowercase ( self : Optional[int] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def lowercase ( self : Optional[int] ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_snake_case = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowercase ( self : List[Any] ):
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowerCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowerCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowerCamelCase ).long(),
}
_snake_case = self.model_tester.get_config()
_snake_case = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : Union[str, Any] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : str ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : str ):
if not self.model_tester.is_training:
return
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Optional[int] ):
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase__ = 1e-4
def _UpperCAmelCase ( ) -> Tuple:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Optional[Any] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase ( self : Any ):
_snake_case = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
_snake_case = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : str ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_snake_case = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_snake_case = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : Optional[int] ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_snake_case = inputs['''pixel_values'''].to(_lowerCamelCase )
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''mask_labels''']]
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 40 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=3 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : str=10 , UpperCamelCase : Any=[10, 20, 30, 40] , UpperCamelCase : List[Any]=[1, 1, 2, 1] , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=True , UpperCamelCase : Optional[int]="relu" , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(UpperCamelCase )
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = TFResNetModel(config=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ (self : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFResNetForImageClassification(UpperCamelCase )
lowercase__ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ : str = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Optional[int] = False
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = TFResNetModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
lowercase__ = model_class(UpperCamelCase )
lowercase__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFResNetModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def _SCREAMING_SNAKE_CASE () -> Dict:
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase , return_tensors='''tf''' )
# forward pass
lowercase__ = model(**UpperCamelCase )
# verify the logits
lowercase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowercase__ = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCamelCase , atol=1E-4 ) )
| 2 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase : Optional[Any] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCamelCase : Tuple = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCamelCase : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCamelCase : Any = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCamelCase : Tuple = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCamelCase : Optional[int] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCamelCase : Dict = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) )
lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _SCREAMING_SNAKE_CASE (A = 100 ) -> str:
"""simple docstring"""
return (generate_random_hand() for _ in range(A ))
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
assert PokerHand(A )._is_flush() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A )._is_straight() == expected
@pytest.mark.parametrize('''hand, expected, card_values''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any:
"""simple docstring"""
lowercase__ = PokerHand(A )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple:
"""simple docstring"""
assert PokerHand(A )._is_same_kind() == expected
@pytest.mark.parametrize('''hand, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A )._hand_type == expected
@pytest.mark.parametrize('''hand, other, expected''' , A )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
@pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() )
def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected
def _SCREAMING_SNAKE_CASE () -> Tuple:
"""simple docstring"""
lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS]
lowercase__ = poker_hands.copy()
shuffle(A )
lowercase__ = chain(sorted(A ) )
for index, hand in enumerate(A ):
assert hand == poker_hands[index]
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )]
pokerhands.sort(reverse=A )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = PokerHand('''2C 4S AS 3D 5C''' )
lowercase__ = True
lowercase__ = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
lowercase__ = os.path.abspath(os.path.dirname(A ) )
lowercase__ = os.path.join(A , '''poker_hands.txt''' )
with open(A ) as file_hand:
for line in file_hand:
lowercase__ = line[:14].strip()
lowercase__ = line[15:].strip()
lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A )
lowercase__ = player.compare_with(A )
if output == "Win":
answer += 1
assert answer == 376
| 2 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Tuple= logging.get_logger(__name__)
_a : Optional[int]= {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class UpperCamelCase ( __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] = "falcon"
UpperCAmelCase : Tuple = ["past_key_values"]
def __init__(self : List[Any] , _A : List[Any]=6_50_24 , _A : str=45_44 , _A : Any=32 , _A : Any=71 , _A : Union[str, Any]=1E-5 , _A : Dict=0.02 , _A : Optional[Any]=True , _A : str=0.0 , _A : Tuple=0.0 , _A : List[str]=None , _A : str=False , _A : Any=False , _A : List[Any]=True , _A : List[str]=True , _A : List[str]=False , _A : List[Any]=11 , _A : int=11 , **_A : Tuple , ) -> Optional[Any]:
__snake_case : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
__snake_case : Optional[Any] = kwargs.pop('n_embed' , lowerCamelCase__)
__snake_case : Optional[Any] = hidden_size if n_embed is None else n_embed
__snake_case : Any = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Union[str, Any] = layer_norm_epsilon
__snake_case : Optional[int] = initializer_range
__snake_case : Optional[Any] = use_cache
__snake_case : Optional[int] = hidden_dropout
__snake_case : Dict = attention_dropout
__snake_case : Optional[int] = bos_token_id
__snake_case : List[Any] = eos_token_id
__snake_case : List[str] = num_attention_heads if num_kv_heads is None else num_kv_heads
__snake_case : List[Any] = alibi
__snake_case : Tuple = new_decoder_architecture
__snake_case : Optional[int] = multi_query # Ignored when new_decoder_architecture is True
__snake_case : Union[str, Any] = parallel_attn
__snake_case : List[str] = bias
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__)
@property
def _lowercase (self : str) -> List[Any]:
return self.hidden_size // self.num_attention_heads
@property
def _lowercase (self : Dict) -> Optional[Any]:
return not self.alibi
| 357 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_a : int= NewType("DataClass", Any)
_a : Dict= NewType("DataClassType", Any)
def __UpperCAmelCase ( UpperCAmelCase_ : Any ) -> Optional[Any]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def __UpperCAmelCase ( UpperCAmelCase_ : list ) -> Callable[[str], Any]:
'''simple docstring'''
__snake_case : str = {str(UpperCAmelCase_ ): choice for choice in choices}
return lambda UpperCAmelCase_ : str_to_choice.get(UpperCAmelCase_ , UpperCAmelCase_ )
def __UpperCAmelCase ( *,
UpperCAmelCase_ : Union[str, List[str]] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Any = dataclasses.MISSING , UpperCAmelCase_ : Callable[[], Any] = dataclasses.MISSING , UpperCAmelCase_ : dict = None , **UpperCAmelCase_ : str , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__snake_case : Optional[Any] = {}
if aliases is not None:
__snake_case : Optional[int] = aliases
if help is not None:
__snake_case : Optional[int] = help
return dataclasses.field(metadata=UpperCAmelCase_ , default=UpperCAmelCase_ , default_factory=UpperCAmelCase_ , **UpperCAmelCase_ )
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Iterable[DataClassType]
def __init__(self : Tuple , _A : Union[DataClassType, Iterable[DataClassType]] , **_A : int) -> int:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__snake_case : Union[str, Any] = ArgumentDefaultsHelpFormatter
super().__init__(**_A)
if dataclasses.is_dataclass(_A):
__snake_case : Optional[int] = [dataclass_types]
__snake_case : Dict = list(_A)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_A)
@staticmethod
def _lowercase (_A : ArgumentParser , _A : dataclasses.Field) -> Tuple:
__snake_case : Union[str, Any] = f"--{field.name}"
__snake_case : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _A):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default')
__snake_case : Any = kwargs.pop('aliases' , [])
if isinstance(_A , _A):
__snake_case : Optional[Any] = [aliases]
__snake_case : Tuple = getattr(field.type , '__origin__' , field.type)
if origin_type is Union or (hasattr(_A , 'UnionType') and isinstance(_A , types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(_A) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
f" Problem encountered in field '{field.name}'.")
if type(_A) not in field.type.__args__:
# filter `str` in Union
__snake_case : Tuple = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__snake_case : Optional[int] = getattr(field.type , '__origin__' , field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__snake_case : Optional[Any] = (
field.type.__args__[0] if isinstance(_A , field.type.__args__[1]) else field.type.__args__[1]
)
__snake_case : Tuple = getattr(field.type , '__origin__' , field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__snake_case : Optional[int] = {}
if origin_type is Literal or (isinstance(field.type , _A) and issubclass(field.type , _A)):
if origin_type is Literal:
__snake_case : Tuple = field.type.__args__
else:
__snake_case : Dict = [x.value for x in field.type]
__snake_case : Dict = make_choice_type_function(kwargs['choices'])
if field.default is not dataclasses.MISSING:
__snake_case : Dict = field.default
else:
__snake_case : Union[str, Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__snake_case : Tuple = copy(_A)
# Hack because type=bool in argparse does not behave as we want.
__snake_case : Dict = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__snake_case : str = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__snake_case : Any = default
# This tells argparse we accept 0 or 1 value after --field_name
__snake_case : Dict = '?'
# This is the value that will get picked if we do --field_name (without value)
__snake_case : List[str] = True
elif isclass(_A) and issubclass(_A , _A):
__snake_case : str = field.type.__args__[0]
__snake_case : Any = '+'
if field.default_factory is not dataclasses.MISSING:
__snake_case : List[str] = field.default_factory()
elif field.default is dataclasses.MISSING:
__snake_case : Any = True
else:
__snake_case : Tuple = field.type
if field.default is not dataclasses.MISSING:
__snake_case : Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
__snake_case : List[Any] = field.default_factory()
else:
__snake_case : Union[str, Any] = True
parser.add_argument(_A , *_A , **_A)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__snake_case : List[str] = False
parser.add_argument(f"--no_{field.name}" , action='store_false' , dest=field.name , **_A)
def _lowercase (self : List[Any] , _A : DataClassType) -> Optional[int]:
if hasattr(_A , '_argument_group_name'):
__snake_case : Union[str, Any] = self.add_argument_group(dtype._argument_group_name)
else:
__snake_case : int = self
try:
__snake_case : Dict[str, type] = get_type_hints(_A)
except NameError:
raise RuntimeError(
f"Type resolution failed for {dtype}. Try declaring the class in global scope or "
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)')
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_A):
__snake_case : Union[str, Any] = '.'.join(map(_A , sys.version_info[:3]))
raise RuntimeError(
f"Type resolution failed for {dtype} on Python {python_version}. Try removing "
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.') from ex
raise
for field in dataclasses.fields(_A):
if not field.init:
continue
__snake_case : Optional[Any] = type_hints[field.name]
self._parse_dataclass_field(_A , _A)
def _lowercase (self : Union[str, Any] , _A : List[Any]=None , _A : Optional[Any]=False , _A : int=True , _A : List[Any]=None , _A : str=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
__snake_case : Any = []
if args_filename:
args_files.append(Path(_A))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix('.args'))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__snake_case : int = ArgumentParser()
args_file_parser.add_argument(_A , type=_A , action='append')
# Use only remaining args for further parsing (remove the args_file_flag)
__snake_case , __snake_case : int = args_file_parser.parse_known_args(args=_A)
__snake_case : int = vars(_A).get(args_file_flag.lstrip('-') , _A)
if cmd_args_file_paths:
args_files.extend([Path(_A) for p in cmd_args_file_paths])
__snake_case : Optional[int] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__snake_case : List[str] = file_args + args if args is not None else file_args + sys.argv[1:]
__snake_case , __snake_case : Tuple = self.parse_known_args(args=_A)
__snake_case : Dict = []
for dtype in self.dataclass_types:
__snake_case : List[Any] = {f.name for f in dataclasses.fields(_A) if f.init}
__snake_case : List[str] = {k: v for k, v in vars(_A).items() if k in keys}
for k in keys:
delattr(_A , _A)
__snake_case : List[str] = dtype(**_A)
outputs.append(_A)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(_A)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}")
return (*outputs,)
def _lowercase (self : Tuple , _A : Dict[str, Any] , _A : bool = False) -> Tuple[DataClass, ...]:
__snake_case : List[Any] = set(args.keys())
__snake_case : Dict = []
for dtype in self.dataclass_types:
__snake_case : List[str] = {f.name for f in dataclasses.fields(_A) if f.init}
__snake_case : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
__snake_case : List[str] = dtype(**_A)
outputs.append(_A)
if not allow_extra_keys and unused_keys:
raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(_A)}")
return tuple(_A)
def _lowercase (self : int , _A : str , _A : bool = False) -> Tuple[DataClass, ...]:
with open(Path(_A) , encoding='utf-8') as open_json_file:
__snake_case : int = json.loads(open_json_file.read())
__snake_case : Optional[int] = self.parse_dict(_A , allow_extra_keys=_A)
return tuple(_A)
def _lowercase (self : List[str] , _A : str , _A : bool = False) -> Tuple[DataClass, ...]:
__snake_case : Dict = self.parse_dict(yaml.safe_load(Path(_A).read_text()) , allow_extra_keys=_A)
return tuple(_A)
| 95 | 0 |
"""simple docstring"""
def lowercase (snake_case__ : int , snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
lowerCAmelCase = str(bin(UpperCAmelCase_ ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase (snake_case__ : int , snake_case__ : int ) -> List[str]:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
lowerCAmelCase = str(bin(UpperCAmelCase_ ) )[2:]
if shift_amount >= len(UpperCAmelCase_ ):
return "0b0"
lowerCAmelCase = binary_number[: len(UpperCAmelCase_ ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase (snake_case__ : int , snake_case__ : int ) -> List[Any]:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
lowerCAmelCase = """0""" + str(bin(UpperCAmelCase_ ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
lowerCAmelCase = len(bin(UpperCAmelCase_ )[3:] ) # Find 2's complement of number
lowerCAmelCase = bin(abs(UpperCAmelCase_ ) - (1 << binary_number_length) )[3:]
lowerCAmelCase = (
"""1""" + """0""" * (binary_number_length - len(UpperCAmelCase_ )) + binary_number
)
if shift_amount >= len(UpperCAmelCase_ ):
return "0b" + binary_number[0] * len(UpperCAmelCase_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCAmelCase_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : int ):
if not isinstance(_snake_case , _snake_case ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCAmelCase : Union[str, Any] = 6
lowerCAmelCase : Any = 128
lowerCAmelCase : List[Any] = (2, 2, 18, 2)
lowerCAmelCase : Any = (4, 8, 16, 32)
elif "large" in model_name:
lowerCAmelCase : Tuple = 12
lowerCAmelCase : Dict = 192
lowerCAmelCase : List[str] = (2, 2, 18, 2)
lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowerCAmelCase : Optional[int] = window_size
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Optional[Any] = depths
lowerCAmelCase : int = num_heads
return config
def _snake_case ( _snake_case : Union[str, Any] ):
if "encoder.mask_token" in name:
lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCAmelCase : Tuple = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowerCAmelCase : str = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowerCAmelCase : Optional[Any] = '''swin.''' + name
return name
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCAmelCase : List[Any] = key.split('''.''' )
lowerCAmelCase : Dict = int(key_split[2] )
lowerCAmelCase : Optional[Any] = int(key_split[4] )
lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase : Dict = val[:dim, :]
lowerCAmelCase : Dict = val[
dim : dim * 2, :
]
lowerCAmelCase : int = val[-dim:, :]
else:
lowerCAmelCase : str = val[
:dim
]
lowerCAmelCase : List[str] = val[
dim : dim * 2
]
lowerCAmelCase : Optional[Any] = val[
-dim:
]
else:
lowerCAmelCase : str = val
return orig_state_dict
def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ):
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model''']
lowerCAmelCase : List[Any] = get_swin_config(_snake_case )
lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case )
model.eval()
lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' )
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Dict = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 314 | 0 |
"""simple docstring"""
import argparse
import copy
def a_ ( _lowerCAmelCase : Optional[int] ):
'''simple docstring'''
lowercase__ : int = {}
with open(_lowerCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase__ : str = []
_list.append([line.split()[1], line.split()[2]] )
lowercase__ : Dict = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase__ : Dict = []
_list.append([line.split()[0], line.split()[2]] )
lowercase__ : Optional[int] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
with open(_lowerCAmelCase ) as f:
lowercase__ : Union[str, Any] = f.read(1 )
lowercase__ : Union[str, Any] = start_node
lowercase__ : int = []
lowercase__ : int = start_node
lowercase__ : Optional[Any] = 0
while visiting not in first_solution:
lowercase__ : str = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCAmelCase ) and k[0] not in first_solution:
lowercase__ : Any = k[1]
lowercase__ : Union[str, Any] = k[0]
first_solution.append(_lowerCAmelCase )
lowercase__ : Dict = distance_of_first_solution + int(_lowerCAmelCase )
lowercase__ : Tuple = best_node
first_solution.append(_lowerCAmelCase )
lowercase__ : int = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase__ : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Optional[int] = []
for n in solution[1:-1]:
lowercase__ : int = solution.index(_lowerCAmelCase )
for kn in solution[1:-1]:
lowercase__ : List[Any] = solution.index(_lowerCAmelCase )
if n == kn:
continue
lowercase__ : Optional[Any] = copy.deepcopy(_lowerCAmelCase )
lowercase__ : Tuple = kn
lowercase__ : Dict = n
lowercase__ : List[str] = 0
for k in _tmp[:-1]:
lowercase__ : Optional[int] = _tmp[_tmp.index(_lowerCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase__ : Optional[Any] = distance + int(i[1] )
_tmp.append(_lowerCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase__ : Dict = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowerCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : List[str] = 1
lowercase__ : Optional[int] = first_solution
lowercase__ : List[Any] = []
lowercase__ : Optional[int] = distance_of_first_solution
lowercase__ : List[str] = solution
while count <= iters:
lowercase__ : Any = find_neighborhood(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Any = 0
lowercase__ : int = neighborhood[index_of_best_solution]
lowercase__ : List[Any] = len(_lowerCAmelCase ) - 1
lowercase__ : List[str] = False
while not found:
lowercase__ : Optional[Any] = 0
while i < len(_lowerCAmelCase ):
if best_solution[i] != solution[i]:
lowercase__ : Union[str, Any] = best_solution[i]
lowercase__ : Tuple = solution[i]
break
lowercase__ : str = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase__ : List[str] = True
lowercase__ : List[str] = best_solution[:-1]
lowercase__ : List[str] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase__ : List[Any] = cost
lowercase__ : Tuple = solution
else:
lowercase__ : Tuple = index_of_best_solution + 1
lowercase__ : Any = neighborhood[index_of_best_solution]
if len(_lowerCAmelCase ) >= size:
tabu_list.pop(0 )
lowercase__ : List[Any] = count + 1
return best_solution_ever, best_cost
def a_ ( _lowerCAmelCase : Tuple=None ):
'''simple docstring'''
lowercase__ : Tuple = generate_neighbours(args.File )
lowercase__ , lowercase__ : Optional[int] = generate_first_solution(
args.File , _lowerCAmelCase )
lowercase__ , lowercase__ : Optional[int] = tabu_search(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 77 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCAmelCase_ :
def __init__( self , a ) -> List[str]:
if isinstance(a , a ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowercase__ : Optional[Any] = deepcopy(a )
elif os.path.exists(a ):
with io.open(a , 'r' , encoding='utf-8' ) as f:
lowercase__ : List[Any] = json.load(a )
else:
try:
lowercase__ : Optional[int] = baseaa.urlsafe_baadecode(a ).decode('utf-8' )
lowercase__ : List[str] = json.loads(a )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowercase__ : Any = config
self.set_stage_and_offload()
def _UpperCAmelCase ( self ) -> Dict:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowercase__ : Tuple = self.get_value('zero_optimization.stage' , -1 )
# offload
lowercase__ : int = False
if self.is_zeroa() or self.is_zeroa():
lowercase__ : str = set(['cpu', 'nvme'] )
lowercase__ : Optional[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowercase__ : Optional[Any] = True
def _UpperCAmelCase ( self , a ) -> Any:
lowercase__ : Dict = self.config
# find the config node of interest if it exists
lowercase__ : int = ds_key_long.split('.' )
lowercase__ : Dict = nodes.pop()
for node in nodes:
lowercase__ : Optional[Any] = config.get(a )
if config is None:
return None, ds_key
return config, ds_key
def _UpperCAmelCase ( self , a , a=None ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = self.find_config_node(a )
if config is None:
return default
return config.get(a , a )
def _UpperCAmelCase ( self , a , a=False ) -> Any:
lowercase__ : str = self.config
# find the config node of interest if it exists
lowercase__ : List[Any] = ds_key_long.split('.' )
for node in nodes:
lowercase__ : str = config
lowercase__ : str = config.get(a )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(a )
def _UpperCAmelCase ( self , a ) -> List[Any]:
lowercase__ : Union[str, Any] = self.get_value(a )
return False if value is None else bool(a )
def _UpperCAmelCase ( self , a ) -> Any:
lowercase__ : Any = self.get_value(a )
return False if value is None else not bool(a )
def _UpperCAmelCase ( self ) -> Tuple:
return self._stage == 2
def _UpperCAmelCase ( self ) -> List[Any]:
return self._stage == 3
def _UpperCAmelCase ( self ) -> str:
return self._offload
class UpperCAmelCase_ :
def __init__( self , a ) -> str:
lowercase__ : Tuple = engine
def _UpperCAmelCase ( self , a , **a ) -> Optional[int]:
# runs backpropagation and handles mixed precision
self.engine.backward(a , **a )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCAmelCase_ ( _a):
def __init__( self , a ) -> Dict:
super().__init__(a , device_placement=a , scaler=a )
lowercase__ : Union[str, Any] = hasattr(self.optimizer , 'overflow' )
def _UpperCAmelCase ( self , a=None ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _UpperCAmelCase ( self ) -> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _UpperCAmelCase ( self ) -> Tuple:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCAmelCase_ ( _a):
def __init__( self , a , a ) -> Any:
super().__init__(a , a )
def _UpperCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCAmelCase_ :
def __init__( self , a , a=0.001 , a=0 , **a ) -> Tuple:
lowercase__ : List[Any] = params
lowercase__ : int = lr
lowercase__ : int = weight_decay
lowercase__ : Union[str, Any] = kwargs
class UpperCAmelCase_ :
def __init__( self , a , a=None , a=0 , **a ) -> Tuple:
lowercase__ : Dict = optimizer
lowercase__ : List[str] = total_num_steps
lowercase__ : Optional[int] = warmup_num_steps
lowercase__ : List[Any] = kwargs
| 77 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 183 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[32, 64, 128] , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2"] , _SCREAMING_SNAKE_CASE=[1, 2] , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = patch_norm
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = is_training
UpperCamelCase = scope
UpperCamelCase = use_labels
UpperCamelCase = type_sequence_label_size
UpperCamelCase = encoder_stride
UpperCamelCase = out_features
UpperCamelCase = out_indices
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> str:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
UpperCamelCase = FocalNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = FocalNetBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = FocalNetBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = FocalNetForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FocalNetForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = FocalNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 , has_text_modality=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Tuple:
"""simple docstring"""
return
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self ) -> int:
"""simple docstring"""
pass
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# FocalNet has a different seq_length
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = reshaped_hidden_states[0].shape
UpperCamelCase = (
reshaped_hidden_states[0].view(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = FocalNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.default_image_processor
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = (FocalNetBackbone,) if is_torch_available() else ()
lowercase = FocalNetConfig
lowercase = False
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = FocalNetModelTester(self )
| 183 | 1 |
"""simple docstring"""
from timeit import timeit
__lowercase = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowercase ( A_ )-> bool:
'''simple docstring'''
a : List[str] = 0
a : Optional[Any] = len(A_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowercase ( A_ )-> bool:
'''simple docstring'''
a : List[Any] = len(A_ ) // 2
a : Optional[Any] = len(A_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(A_ ) )
def lowercase ( A_ )-> bool:
'''simple docstring'''
if len(A_ ) <= 2:
return True
if s[0] == s[len(A_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowercase ( A_ )-> bool:
'''simple docstring'''
return s == s[::-1]
def lowercase ( A_ )-> None:
'''simple docstring'''
a : Tuple = F'''all({name}(key) is value for key, value in test_data.items())'''
a : Union[str, Any] = F'''from __main__ import test_data, {name}'''
a : List[str] = 500_000
a : Optional[int] = timeit(stmt=A_ , setup=A_ , number=A_ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 40 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCAmelCase : List[str] = """document_qa"""
UpperCAmelCase : str = AutoProcessor
UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel
UpperCAmelCase : int = ["""image""", """text"""]
UpperCAmelCase : int = ["""text"""]
def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.")
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str):
a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase)
a : Optional[Any] = self.pre_processor.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids
a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __snake_case ( self : int , __UpperCAmelCase : int):
return self.model.generate(
inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences
def __snake_case ( self : str , __UpperCAmelCase : List[Any]):
a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0]
a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "")
a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "")
a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token
a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase)
return sequence["answer"]
| 40 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = '''ZinengTang/tvlt-base'''
SCREAMING_SNAKE_CASE_ : Dict = tempfile.mkdtemp()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : List[Any]):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowercase_ : List[str]):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : int = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_)
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_ : str = TvltProcessor.from_pretrained(self.tmpdirname)
self.assertIsInstance(processor.feature_extractor , lowercase_)
self.assertIsInstance(processor.image_processor , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = np.ones([12000])
SCREAMING_SNAKE_CASE_ : List[Any] = feature_extractor(lowercase_ , return_tensors='''np''')
SCREAMING_SNAKE_CASE_ : Any = processor(audio=lowercase_ , return_tensors='''np''')
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : Dict = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = np.ones([3, 224, 224])
SCREAMING_SNAKE_CASE_ : Dict = image_processor(lowercase_ , return_tensors='''np''')
SCREAMING_SNAKE_CASE_ : str = processor(images=lowercase_ , return_tensors='''np''')
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : str = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = np.ones([12000])
SCREAMING_SNAKE_CASE_ : Dict = np.ones([3, 224, 224])
SCREAMING_SNAKE_CASE_ : List[str] = processor(audio=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE_ : Dict = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_)
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 318 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "feature_extractor"]
__UpperCamelCase = "TvltImageProcessor"
__UpperCamelCase = "TvltFeatureExtractor"
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]):
'''simple docstring'''
super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor
SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor
def __call__( self : Any , lowercase_ : str=None , lowercase_ : Optional[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Union[str, Any]=False , *lowercase_ : List[Any] , **lowercase_ : List[str] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''')
SCREAMING_SNAKE_CASE_ : Any = None
if images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_)
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_)
if audio is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor(
lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if audio is not None:
output_dict.update(lowercase_)
if images is not None:
output_dict.update(lowercase_)
if images_mixed_dict is not None:
output_dict.update(lowercase_)
return output_dict
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 318 | 1 |
from itertools import permutations
def __snake_case ( _UpperCAmelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__a = [7, 11, 13, 17]
for i, test in enumerate(_UpperCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __snake_case ( _UpperCAmelCase = 10 ):
return sum(
int(''''''.join(map(_UpperCAmelCase , _UpperCAmelCase ) ) )
for num in permutations(range(_UpperCAmelCase ) )
if is_substring_divisible(_UpperCAmelCase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 49 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] =size if size is not None else {"shortest_edge": 2_0}
a__ : List[str] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Union[str, Any] =batch_size
a__ : List[str] =num_channels
a__ : List[Any] =image_size
a__ : str =min_resolution
a__ : Optional[int] =max_resolution
a__ : Tuple =do_resize
a__ : Union[str, Any] =size
a__ : List[Any] =do_center_crop
a__ : List[str] =crop_size
a__ : Optional[int] =do_flip_channel_order
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : int = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =MobileViTImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : int =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[str] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = 'gptj'
_snake_case : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any , __lowerCAmelCase : int=5_0400 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : Optional[Any]=4096 , __lowerCAmelCase : str=28 , __lowerCAmelCase : str=16 , __lowerCAmelCase : Tuple=64 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict="gelu_new" , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Union[str, Any]=1e-5 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=5_0256 , __lowerCAmelCase : str=5_0256 , __lowerCAmelCase : Union[str, Any]=False , **__lowerCAmelCase : Tuple , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = n_inner
_UpperCAmelCase = rotary_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase )
class a ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" , __lowerCAmelCase : List[PatchingSpec] = None , __lowerCAmelCase : bool = False , ):
super().__init__(__lowerCAmelCase , task=__lowerCAmelCase , patching_specs=__lowerCAmelCase , use_past=__lowerCAmelCase )
if not getattr(self._config , """pad_token_id""" , __lowerCAmelCase ):
# TODO: how to do that better?
_UpperCAmelCase = 0
@property
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction="""inputs""" )
_UpperCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase_ ( self : List[str] ):
return self._config.n_layer
@property
def lowerCAmelCase_ ( self : str ):
return self._config.n_head
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
_UpperCAmelCase = super(__lowerCAmelCase , self ).generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCAmelCase = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers )
]
_UpperCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
_UpperCAmelCase = ordered_inputs["""attention_mask"""].dtype
_UpperCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase_ ( self : Tuple ):
return 13
| 30 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Dict = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
snake_case_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def A__ ( UpperCAmelCase_ ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCamelCase : Any = model_type_to_module_name(UpperCAmelCase_ )
_UpperCamelCase : List[str] = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(UpperCAmelCase_ , UpperCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(UpperCAmelCase_ , '__name__' , UpperCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCamelCase : List[Any] = importlib.import_module('transformers' )
if hasattr(UpperCAmelCase_ , UpperCAmelCase_ ):
return getattr(UpperCAmelCase_ , UpperCAmelCase_ )
return None
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , **UpperCAmelCase_ , ):
_UpperCamelCase : Optional[Any] = get_file_from_repo(
UpperCAmelCase_ , UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , use_auth_token=UpperCAmelCase_ , revision=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(UpperCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(UpperCAmelCase_ )
class lowercase__ :
def __init__( self : Tuple ):
'''simple docstring'''
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(lowerCamelCase__ )
def UpperCamelCase_ ( cls : Tuple ,lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Tuple ):
'''simple docstring'''
_UpperCamelCase : int = kwargs.pop('config' ,lowerCamelCase__ )
_UpperCamelCase : List[Any] = kwargs.pop('trust_remote_code' ,lowerCamelCase__ )
_UpperCamelCase : Tuple = True
_UpperCamelCase , _UpperCamelCase : Dict = ImageProcessingMixin.get_image_processor_dict(lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : List[Any] = config_dict.get('image_processor_type' ,lowerCamelCase__ )
_UpperCamelCase : Tuple = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_UpperCamelCase : Optional[int] = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCamelCase : Optional[Any] = config_dict.pop('feature_extractor_type' ,lowerCamelCase__ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_UpperCamelCase : int = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_UpperCamelCase : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_UpperCamelCase : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : int = AutoConfig.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
# It could be in `config.image_processor_type``
_UpperCamelCase : List[Any] = getattr(lowerCamelCase__ ,'image_processor_type' ,lowerCamelCase__ )
if hasattr(lowerCamelCase__ ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_UpperCamelCase : List[str] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_UpperCamelCase : Union[str, Any] = image_processor_class_from_name(lowerCamelCase__ )
_UpperCamelCase : str = image_processor_auto_map is not None
_UpperCamelCase : Any = image_processor_class is not None or type(lowerCamelCase__ ) in IMAGE_PROCESSOR_MAPPING
_UpperCamelCase : Any = resolve_trust_remote_code(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if has_remote_code and trust_remote_code:
_UpperCamelCase : str = get_class_from_dynamic_module(
lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = kwargs.pop('code_revision' ,lowerCamelCase__ )
if os.path.isdir(lowerCamelCase__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowerCamelCase__ ) in IMAGE_PROCESSOR_MAPPING:
_UpperCamelCase : int = IMAGE_PROCESSOR_MAPPING[type(lowerCamelCase__ )]
return image_processor_class.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def UpperCamelCase_ ( lowerCamelCase__ : List[str] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(lowerCamelCase__ ,lowerCamelCase__ )
| 83 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
return max(metric_fn(_A , _A ) for gt in ground_truths )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A )
answers.append(_A )
else:
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [[reference] for reference in references]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for prediction, ground_truths in zip(_A , _A ):
total += 1
em += metric_max_over_ground_truths(_A , _A , _A )
fa += metric_max_over_ground_truths(_A , _A , _A )
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = args.k
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()]
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0
for hypo, reference in zip(_A , _A ):
SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] )
SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
def strip_title(_A ):
if title.startswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[1:]
if title.endswith('''"''' ):
SCREAMING_SNAKE_CASE__ = title[:-1]
return title
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A )
SCREAMING_SNAKE_CASE__ = question_enc_outputs[0]
SCREAMING_SNAKE_CASE__ = rag_model.retriever(
_A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE__ = []
for docs in all_docs:
SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(_A ) )
return provenance_strings
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_A , return_tensors='''pt''' , padding=_A , truncation=_A )
SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate
_A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A )
if args.print_predictions:
for q, a in zip(_A , _A ):
logger.info('''Q: {} - A: {}'''.format(_A , _A ) )
return answers
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE__ = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE__ = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE__ = args.index_path
else:
SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration
SCREAMING_SNAKE_CASE__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , _A )
SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(_A , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(_A ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
SCREAMING_SNAKE_CASE__ = []
for line in tqdm(_A ):
questions.append(line.strip() )
if len(_A ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) + '''\n''' )
preds_file.flush()
SCREAMING_SNAKE_CASE__ = []
if len(_A ) > 0:
SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A )
preds_file.write('''\n'''.join(_A ) )
preds_file.flush()
score_fn(_A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = get_args()
main(args)
| 314 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _a ( _snake_case = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
UpperCAmelCase = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
UpperCAmelCase = soup.findAll("""h1""" )
UpperCAmelCase = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 362 |
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case , _snake_case = None , _snake_case = None ):
"""simple docstring"""
if start is None:
UpperCAmelCase = 0
if end is None:
UpperCAmelCase = len(_snake_case ) - 1
if start >= end:
return
UpperCAmelCase = (start + end) // 2
slowsort(_snake_case , _snake_case , _snake_case )
slowsort(_snake_case , mid + 1 , _snake_case )
if sequence[end] < sequence[mid]:
UpperCAmelCase , UpperCAmelCase = sequence[mid], sequence[end]
slowsort(_snake_case , _snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 234 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCamelCase__ ( ) -> Dict:
raise RuntimeError('CUDA out of memory.' )
class a ( nn.Module ):
def __init__( self : Any ) -> Tuple:
super().__init__()
lowerCamelCase_ = nn.Linear(3 , 4 )
lowerCamelCase_ = nn.BatchNormad(4 )
lowerCamelCase_ = nn.Linear(4 , 5 )
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__SCREAMING_SNAKE_CASE ) ) )
class a ( unittest.TestCase ):
def UpperCamelCase ( self : List[str] ) -> Any:
lowerCamelCase_ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : List[str] ):
nonlocal batch_sizes
batch_sizes.append(__SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
def UpperCamelCase ( self : Optional[Any] ) -> List[str]:
lowerCamelCase_ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
nonlocal batch_sizes
batch_sizes.append(__SCREAMING_SNAKE_CASE )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCamelCase_ , lowerCamelCase_ = mock_training_loop_function('hello' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
pass
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self : Tuple ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase ( self : Dict ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__SCREAMING_SNAKE_CASE : int ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self : Any ) -> Dict:
lowerCamelCase_ = torch.cuda.memory_allocated()
lowerCamelCase_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = release_memory(__SCREAMING_SNAKE_CASE )
self.assertEqual(torch.cuda.memory_allocated() , __SCREAMING_SNAKE_CASE )
| 183 |
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( _lowerCamelCase : Tuple ) -> Dict:
# getting number of pixels in the image
lowerCamelCase_ , lowerCamelCase_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
lowerCamelCase_ = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_SCREAMING_SNAKE_CASE : List[Any] = imread('''image_data/lena.jpg''', 1)
# convert to its negative
_SCREAMING_SNAKE_CASE : List[Any] = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 183 | 1 |
from maths.prime_factors import prime_factors
def lowerCamelCase ( a_ ) -> int:
if not isinstance(a_ , a_ ):
lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
def lowerCamelCase ( a_ , a_ ) -> List[Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
lowerCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 14 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : Any = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase : List[Any] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __snake_case ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any]):
a : List[str] = TextaTextGenerationPipeline(model=_A , tokenizer=_A)
return generator, ["Something to write", "Something else"]
def __snake_case ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any):
a : Dict = generator("Something there")
self.assertEqual(_A , [{"generated_text": ANY(_A)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
a : Dict = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=_A)
self.assertEqual(
_A , [
[{"generated_text": ANY(_A)}, {"generated_text": ANY(_A)}],
[{"generated_text": ANY(_A)}, {"generated_text": ANY(_A)}],
] , )
a : List[Any] = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=_A)
self.assertEqual(
_A , [
[{"generated_text": ANY(_A)}, {"generated_text": ANY(_A)}],
[{"generated_text": ANY(_A)}, {"generated_text": ANY(_A)}],
] , )
with self.assertRaises(_A):
generator(4)
@require_torch
def __snake_case ( self : Optional[int]):
a : Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
a : str = generator("Something there" , do_sample=_A)
self.assertEqual(_A , [{"generated_text": ""}])
a : Tuple = 3
a : int = generator(
"Something there" , num_return_sequences=_A , num_beams=_A , )
a : Optional[Any] = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(_A , _A)
a : Dict = generator("This is a test" , do_sample=_A , num_return_sequences=2 , return_tensors=_A)
self.assertEqual(
_A , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
a : List[Any] = generator.model.config.eos_token_id
a : Optional[Any] = '<pad>'
a : Dict = generator(
["This is a test", "This is a second test"] , do_sample=_A , num_return_sequences=2 , batch_size=2 , return_tensors=_A , )
self.assertEqual(
_A , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def __snake_case ( self : Union[str, Any]):
a : Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
a : Tuple = generator("Something there" , do_sample=_A)
self.assertEqual(_A , [{"generated_text": ""}])
| 350 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """longformer"""
def __init__( self : List[Any] , __UpperCAmelCase : Union[List[int], int] = 512 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 30522 , __UpperCAmelCase : int = 768 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 3072 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : float = 1e-12 , __UpperCAmelCase : bool = False , **__UpperCAmelCase : int , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : int = attention_window
a : List[str] = sep_token_id
a : List[Any] = bos_token_id
a : int = eos_token_id
a : Union[str, Any] = vocab_size
a : Dict = hidden_size
a : int = num_hidden_layers
a : int = num_attention_heads
a : Optional[int] = hidden_act
a : Union[str, Any] = intermediate_size
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : str = max_position_embeddings
a : Dict = type_vocab_size
a : Optional[Any] = initializer_range
a : List[Any] = layer_norm_eps
a : List[str] = onnx_export
class _A ( _a ):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : "PretrainedConfig" , __UpperCAmelCase : str = "default" , __UpperCAmelCase : "List[PatchingSpec]" = None):
super().__init__(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = True
@property
def __snake_case ( self : int):
if self.task == "multiple-choice":
a : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
])
@property
def __snake_case ( self : Union[str, Any]):
a : str = super().outputs
if self.task == "default":
a : Tuple = {0: "batch"}
return outputs
@property
def __snake_case ( self : Optional[int]):
return 1e-4
@property
def __snake_case ( self : str):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14)
def __snake_case ( self : List[str] , __UpperCAmelCase : "PreTrainedTokenizerBase" , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
a : str = super().generate_dummy_inputs(
preprocessor=__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase)
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
a : Union[str, Any] = torch.zeros_like(inputs["input_ids"])
# make every second token global
a : Dict = 1
return inputs
| 226 | 0 |
def a ( snake_case__: list , snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if index == number_of_items:
return 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ , index + 1 )
if weights[index] <= max_weight:
lowercase_ = values[index] + knapsack(
snake_case__ , snake_case__ , snake_case__ , max_weight - weights[index] , index + 1 )
return max(snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 |
def a ( snake_case__: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 30 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : List[Any] = logging.get_logger()
@dataclass
class snake_case :
__magic_name__ = 42
__magic_name__ = field(default_factory=UpperCAmelCase )
__magic_name__ = field(default_factory=UpperCAmelCase )
def lowerCamelCase__ ( self : str , A : Any , A : Tensor , A : Tensor ):
'''simple docstring'''
a : Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(A , nn.Convad ) or isinstance(A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A )
def __call__( self : List[Any] , A : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case :
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 1
__magic_name__ = field(default_factory=UpperCAmelCase )
__magic_name__ = field(default_factory=UpperCAmelCase )
__magic_name__ = True
def __call__( self : List[str] , A : Tensor ):
'''simple docstring'''
a : Any = Tracker(self.dest )(A ).parametrized
a : Any = Tracker(self.src )(A ).parametrized
a : Union[str, Any] = list(filter(lambda A : type(A ) not in self.src_skip , A ) )
a : Any = list(filter(lambda A : type(A ) not in self.dest_skip , A ) )
if len(A ) != len(A ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(A )} operations while'''
F''' destination module has {len(A )}.''' )
for dest_m, src_m in zip(A , A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class snake_case ( nn.Module ):
def __init__( self : Any , A : nn.Module ):
'''simple docstring'''
super().__init__()
a : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('conv1', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('block' ), F'''Unexpected layer name {k}'''
a : int = len(A ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
a : Union[str, Any] = nn.ModuleDict(A )
def lowerCamelCase__ ( self : List[str] , A : Tensor ):
'''simple docstring'''
return get_trunk_forward_outputs(
A , out_feat_keys=A , feature_blocks=self._feature_blocks , )
class snake_case ( UpperCAmelCase ):
def lowerCamelCase__ ( self : List[Any] , A : str ):
'''simple docstring'''
a : List[Any] = x.split('-' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Tuple , A : str ):
'''simple docstring'''
if x not in self:
a : str = self.convert_name_to_timm(A )
a : str = partial(lambda: (timm.create_model(A , pretrained=A ).eval(), None) )
else:
a : List[str] = super().__getitem__(A )
return val
class snake_case ( UpperCAmelCase ):
def __getitem__( self : Optional[int] , A : str ):
'''simple docstring'''
if "seer" in x and "in1k" not in x:
a : List[str] = RegNetModel
else:
a : Optional[int] = RegNetForImageClassification
return val
def snake_case (A_ :Union[str, Any] , A_ :Dict , A_ :List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
a : List[Any] = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def snake_case (A_ :str , A_ :Callable[[], nn.Module] , A_ :Callable[[], nn.Module] , A_ :RegNetConfig , A_ :Path , A_ :bool = True , ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
a : int = from_model_func()
a : List[str] = our_model_func(A_ ).eval()
a : List[Any] = ModuleTransfer(src=A_ , dest=A_ , raise_if_mismatch=A_ )
a : Tuple = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(A_ )
if from_state_dict is not None:
a : str = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
a : List[Any] = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')]
a : Optional[Any] = manually_copy_vissl_head(A_ , our_model.state_dict() , A_ )
our_model.load_state_dict(A_ )
a : Optional[int] = our_model(A_ , output_hidden_states=A_ )
a : List[Any] = (
our_outputs.logits if isinstance(A_ , A_ ) else our_outputs.last_hidden_state
)
a : Tuple = from_model(A_ )
a : Dict = from_output[-1] if type(A_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
a : List[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(A_ , A_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add model' , use_temp_dir=A_ , )
a : List[str] = 2_2_4 if 'seer' not in name else 3_8_4
# we can use the convnext one
a : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' , size=A_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='Add image processor' , use_temp_dir=A_ , )
print(f'''Pushed {name}''' )
def snake_case (A_ :Path , A_ :str = None , A_ :bool = True ):
'''simple docstring'''
a : Union[str, Any] = 'imagenet-1k-id2label.json'
a : Dict = 1_0_0_0
a : int = (1, num_labels)
a : str = 'huggingface/label-files'
a : Dict = num_labels
a : Optional[Any] = json.load(open(cached_download(hf_hub_url(A_ , A_ , repo_type='dataset' ) ) , 'r' ) )
a : Optional[int] = {int(A_ ): v for k, v in idalabel.items()}
a : int = idalabel
a : List[Any] = {v: k for k, v in idalabel.items()}
a : int = partial(A_ , num_labels=A_ , idalabel=A_ , labelaid=A_ )
a : List[str] = {
'regnet-x-002': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type='x' ),
'regnet-x-004': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type='x' ),
'regnet-x-006': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type='x' ),
'regnet-x-008': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type='x' ),
'regnet-x-016': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type='x' ),
'regnet-x-032': ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type='x' ),
'regnet-x-040': ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type='x' ),
'regnet-x-064': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type='x' ),
'regnet-x-080': ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type='x' ),
'regnet-x-120': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type='x' ),
'regnet-x-160': ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type='x' ),
'regnet-x-320': ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type='x' ),
# y variant
'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
'regnet-y-004': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
'regnet-y-006': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
'regnet-y-008': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
'regnet-y-016': ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
'regnet-y-032': ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
'regnet-y-040': ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
'regnet-y-064': ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
'regnet-y-080': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
'regnet-y-120': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
'regnet-y-160': ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
'regnet-y-320': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'regnet-y-1280-seer': RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'regnet-y-2560-seer': RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'regnet-y-10b-seer': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
a : Union[str, Any] = NameToOurModelFuncMap()
a : str = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(A_ :str , A_ :Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
a : Tuple = torch.hub.load_state_dict_from_url(A_ , model_dir=str(A_ ) , map_location='cpu' )
a : Dict = model_func()
# check if we have a head, if yes add it
a : List[str] = files['classy_state_dict']['base_model']['model']
a : int = model_state_dict['trunk']
model.load_state_dict(A_ )
return model.eval(), model_state_dict["heads"]
# pretrained
a : str = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a : List[Any] = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a : Any = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a : Union[str, Any] = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
a : int = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a : Union[str, Any] = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a : int = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a : Tuple = partial(
A_ , 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
A_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , A_ , A_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
A_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , A_ , A_ , A_ , )
return config, expected_shape
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_UpperCamelCase : Dict = parser.parse_args()
_UpperCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 360 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 186 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : str ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(A_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 72 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = old_name
if "patch_embed" in old_name:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = old_name.split("." )
if layer == "0":
_UpperCAmelCase : List[str] = old_name.replace("0" , "convolution1" )
elif layer == "1":
_UpperCAmelCase : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
_UpperCAmelCase : Tuple = old_name.replace("3" , "convolution2" )
else:
_UpperCAmelCase : Tuple = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = R"\b\d{2}\b"
if bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) ):
_UpperCAmelCase : Optional[int] = re.search(R"\d\.\d\d." , __lowerCAmelCase ).group()
else:
_UpperCAmelCase : Any = re.search(R"\d\.\d." , __lowerCAmelCase ).group()
if int(match[0] ) < 6:
_UpperCAmelCase : str = old_name.replace(__lowerCAmelCase , "" )
_UpperCAmelCase : Optional[Any] = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
_UpperCAmelCase : Union[str, Any] = "intermediate_stages." + trimmed_name
else:
_UpperCAmelCase : Tuple = old_name.replace(__lowerCAmelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
_UpperCAmelCase : Any = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
_UpperCAmelCase : List[str] = str(int(match[2] ) - num_meta4D_last_stage )
_UpperCAmelCase : int = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
_UpperCAmelCase : Tuple = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
_UpperCAmelCase : int = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
_UpperCAmelCase : Optional[int] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
_UpperCAmelCase : List[str] = trimmed_name.replace("fc2" , "linear_out" )
_UpperCAmelCase : Optional[Any] = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
_UpperCAmelCase : Union[str, Any] = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_UpperCAmelCase : List[Any] = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_UpperCAmelCase : List[Any] = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
_UpperCAmelCase : Union[str, Any] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
_UpperCAmelCase : List[Any] = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
_UpperCAmelCase : str = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
_UpperCAmelCase : List[str] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_UpperCAmelCase : List[Any] = new_name.replace("norm" , "layernorm" )
_UpperCAmelCase : Any = "efficientformer." + new_name
else:
_UpperCAmelCase : Dict = "efficientformer.encoder." + new_name
return new_name
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key in checkpoint.copy().keys():
_UpperCAmelCase : List[Any] = checkpoint.pop(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = val
return checkpoint
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Tuple = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
_UpperCAmelCase : Dict = EfficientFormerConfig.from_json_file(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = EfficientFormerForImageClassificationWithTeacher(__lowerCAmelCase )
_UpperCAmelCase : Tuple = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
_UpperCAmelCase : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
_UpperCAmelCase : Optional[int] = convert_torch_checkpoint(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
_UpperCAmelCase : Optional[Any] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
_UpperCAmelCase : int = prepare_img()
_UpperCAmelCase : List[str] = 256
_UpperCAmelCase : Optional[int] = 224
_UpperCAmelCase : Tuple = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
_UpperCAmelCase : Any = processor(images=__lowerCAmelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
_UpperCAmelCase : int = Compose(
[
Resize(__lowerCAmelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
Normalize(__lowerCAmelCase , __lowerCAmelCase ),
] )
_UpperCAmelCase : Any = image_transforms(__lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = model(__lowerCAmelCase )
_UpperCAmelCase : Dict = outputs.logits
_UpperCAmelCase : Optional[int] = (1, 1_000)
if "l1" in model_name:
_UpperCAmelCase : List[Any] = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_UpperCAmelCase : List[Any] = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_UpperCAmelCase : List[Any] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__lowerCAmelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 234 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_A : Optional[int] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = ["pixel_values"]
def __init__( self : Union[str, Any] , A : bool = True , A : Dict[str, int] = None , A : PILImageResampling = PILImageResampling.BICUBIC , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_5_5 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : bool = True , **A : List[str] , ):
super().__init__(**A )
lowerCamelCase__ : str = size if size is not None else {'''shortest_edge''': 2_2_4}
lowerCamelCase__ : List[str] = get_size_dict(A , default_to_square=A )
lowerCamelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCamelCase__ : Dict = get_size_dict(A , default_to_square=A , param_name='''crop_size''' )
lowerCamelCase__ : Optional[int] = do_resize
lowerCamelCase__ : Optional[int] = size
lowerCamelCase__ : List[str] = resample
lowerCamelCase__ : Tuple = do_center_crop
lowerCamelCase__ : Tuple = crop_size
lowerCamelCase__ : List[Any] = do_rescale
lowerCamelCase__ : str = rescale_factor
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase__ : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase__ : int = do_convert_rgb
def __lowerCamelCase ( self : int , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : Any , ):
lowerCamelCase__ : Union[str, Any] = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCamelCase__ : Optional[Any] = get_resize_output_image_size(A , size=size['''shortest_edge'''] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def __lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : str , ):
lowerCamelCase__ : Optional[Any] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A , size=(size['''height'''], size['''width''']) , data_format=A , **A )
def __lowerCamelCase ( self : Optional[Any] , A : np.ndarray , A : Union[int, float] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ):
return rescale(A , scale=A , data_format=A , **A )
def __lowerCamelCase ( self : Optional[int] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ):
return normalize(A , mean=A , std=A , data_format=A , **A )
def __lowerCamelCase ( self : Optional[Any] , A : ImageInput , A : bool = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : int = None , A : bool = None , A : float = None , A : bool = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : bool = None , A : Optional[Union[str, TensorType]] = None , A : Optional[ChannelDimension] = ChannelDimension.FIRST , **A : Optional[int] , ):
lowerCamelCase__ : Any = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Any = size if size is not None else self.size
lowerCamelCase__ : Union[str, Any] = get_size_dict(A , param_name='''size''' , default_to_square=A )
lowerCamelCase__ : Union[str, Any] = resample if resample is not None else self.resample
lowerCamelCase__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : Any = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : List[str] = get_size_dict(A , param_name='''crop_size''' , default_to_square=A )
lowerCamelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : str = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase__ : Dict = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase__ : Any = [convert_to_rgb(A ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase__ : Tuple = [to_numpy_array(A ) for image in images]
if do_resize:
lowerCamelCase__ : Optional[Any] = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
lowerCamelCase__ : List[Any] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
lowerCamelCase__ : Dict = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
lowerCamelCase__ : Dict = [self.normalize(image=A , mean=A , std=A ) for image in images]
lowerCamelCase__ : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
lowerCamelCase__ : str = {'''pixel_values''': images}
return BatchFeature(data=A , tensor_type=A )
| 355 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_A : List[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_UpperCAmelCase : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_UpperCAmelCase : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_UpperCAmelCase : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_UpperCAmelCase : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowerCamelCase ( self : str , A : List[str] , A : Tuple , A : str ) ->List[str]:
lowerCamelCase__ : List[Any] = ZeroShotClassificationPipeline(
model=A , tokenizer=A , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowerCamelCase ( self : List[str] , A : Union[str, Any] , A : Optional[Any] ) ->Union[str, Any]:
lowerCamelCase__ : Any = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
# No kwarg
lowerCamelCase__ : str = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
lowerCamelCase__ : Dict = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
lowerCamelCase__ : Dict = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
A , {'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowerCamelCase__ : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
A , {'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowerCamelCase__ : str = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
# https://github.com/huggingface/transformers/issues/13846
lowerCamelCase__ : Optional[int] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
A , [
{'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]}
for i in range(1 )
] , )
lowerCamelCase__ : Any = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
A , [
{'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]}
for i in range(2 )
] , )
with self.assertRaises(A ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(A ):
classifier(A , candidate_labels='''politics''' )
with self.assertRaises(A ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(A ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=A )
with self.assertRaises(A ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(A ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=A , )
self.run_entailment_id(A )
def __lowerCamelCase ( self : Optional[int] , A : Pipeline ) ->Dict:
lowerCamelCase__ : str = zero_shot_classifier.model.config
lowerCamelCase__ : Optional[int] = config.labelaid
lowerCamelCase__ : Tuple = zero_shot_classifier.entailment_id
lowerCamelCase__ : Optional[Any] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
lowerCamelCase__ : Optional[int] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase__ : List[str] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase__ : Optional[Any] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
lowerCamelCase__ : Tuple = original_labelaid
self.assertEqual(A , zero_shot_classifier.entailment_id )
@require_torch
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : Optional[int] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_0_0 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
lowerCamelCase__ : List[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
lowerCamelCase__ : Dict = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def __lowerCamelCase ( self : str ) ->List[str]:
lowerCamelCase__ : Any = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
lowerCamelCase__ : Optional[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
lowerCamelCase__ : int = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
lowerCamelCase__ : int = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
lowerCamelCase__ : Any = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
lowerCamelCase__ : List[Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
lowerCamelCase__ : Dict = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
lowerCamelCase__ : Union[str, Any] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 265 | 0 |
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
A__ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(lowercase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 1 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False )-> str:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = f"Expected string as input, found {type(SCREAMING_SNAKE_CASE_ )}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = f"Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE_ )}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = input_str.split("_" )
UpperCamelCase_ = 0 if use_pascal else 1
UpperCamelCase_ = words[start_index:]
UpperCamelCase_ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase_ = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
SCREAMING_SNAKE_CASE :Optional[int] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
SCREAMING_SNAKE_CASE :Optional[int] = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
SCREAMING_SNAKE_CASE :Tuple = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def UpperCAmelCase_ ( self )-> Tuple:
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = False , _lowercase = False , _lowercase = False , _lowercase = False , )-> int:
UpperCamelCase_ = len(references[0] )
if any(len(_lowercase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCamelCase_ = [[refs[i] for refs in references] for i in range(_lowercase )]
UpperCamelCase_ = TER(
normalized=_lowercase , no_punct=_lowercase , asian_support=_lowercase , case_sensitive=_lowercase , )
UpperCamelCase_ = sb_ter.corpus_score(_lowercase , _lowercase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 60 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Tuple = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class lowercase__ ( lowercase ):
lowercase__ = """efficientnet"""
def __init__( self : Optional[Any] ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 600 ,lowerCamelCase__ : float = 2.0 ,lowerCamelCase__ : float = 3.1 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] ,lowerCamelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] ,lowerCamelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] ,lowerCamelCase__ : List[int] = [] ,lowerCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] ,lowerCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] ,lowerCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] ,lowerCamelCase__ : float = 0.2_5 ,lowerCamelCase__ : str = "swish" ,lowerCamelCase__ : int = 2560 ,lowerCamelCase__ : str = "mean" ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : float = 0.0_0_1 ,lowerCamelCase__ : float = 0.9_9 ,lowerCamelCase__ : float = 0.5 ,lowerCamelCase__ : float = 0.2 ,**lowerCamelCase__ : List[str] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = num_channels
_UpperCamelCase : str = image_size
_UpperCamelCase : List[Any] = width_coefficient
_UpperCamelCase : Optional[Any] = depth_coefficient
_UpperCamelCase : List[str] = depth_divisor
_UpperCamelCase : int = kernel_sizes
_UpperCamelCase : Tuple = in_channels
_UpperCamelCase : Optional[Any] = out_channels
_UpperCamelCase : Dict = depthwise_padding
_UpperCamelCase : List[Any] = strides
_UpperCamelCase : Tuple = num_block_repeats
_UpperCamelCase : Optional[int] = expand_ratios
_UpperCamelCase : Any = squeeze_expansion_ratio
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : List[str] = hidden_dim
_UpperCamelCase : List[str] = pooling_type
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : List[str] = batch_norm_eps
_UpperCamelCase : List[str] = batch_norm_momentum
_UpperCamelCase : str = dropout_rate
_UpperCamelCase : int = drop_connect_rate
_UpperCamelCase : List[str] = sum(lowerCamelCase__ ) * 4
class lowercase__ ( lowercase ):
lowercase__ = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return 1E-5
| 83 |
import qiskit
def a ( _UpperCAmelCase : int , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCAmelCase : Optional[Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCAmelCase : str = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
__A =half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 226 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
def __A (_SCREAMING_SNAKE_CASE ) ->bytes:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
lowerCAmelCase__ :int = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(_lowercase ).content
if __name__ == "__main__":
__A = input("""Enter Video/IGTV url: """).strip()
__A = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 363 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__A = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
"""simple docstring"""
if rng is None:
lowerCAmelCase__ :Dict = random.Random()
lowerCAmelCase__ :Tuple = 1
for dim in shape:
total_dims *= dim
lowerCAmelCase__ :List[Any] = []
for _ in range(_SCREAMING_SNAKE_CASE ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCAmelCase__ :int = np.array(_SCREAMING_SNAKE_CASE , dtype=jnp.intaa ).reshape(_SCREAMING_SNAKE_CASE )
return output
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = ids_tensor(_SCREAMING_SNAKE_CASE , vocab_size=2 , rng=_SCREAMING_SNAKE_CASE )
# make sure that at least one token is attended to for each batch
lowerCAmelCase__ :Any = 1
return attn_mask
@require_flax
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[int] = None
__magic_name__ :List[str] = ()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCAmelCase__ :Union[str, Any] = 2
lowerCAmelCase__ :List[Any] = inputs['input_ids'].shape[-1] // 2
lowerCAmelCase__ :Union[str, Any] = inputs['input_ids'][:max_batch_size, :sequence_length]
lowerCAmelCase__ :str = jnp.ones_like(__UpperCAmelCase )
lowerCAmelCase__ :int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCAmelCase__ :List[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCAmelCase__ :Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self._get_input_ids_and_config()
lowerCAmelCase__ :int = False
lowerCAmelCase__ :List[Any] = max_length
lowerCAmelCase__ :List[Any] = 0
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :int = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase__ :List[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = pt_model_class(__UpperCAmelCase ).eval()
lowerCAmelCase__ :Dict = load_flax_weights_in_pytorch_model(__UpperCAmelCase , flax_model.params )
lowerCAmelCase__ :Union[str, Any] = flax_model.generate(__UpperCAmelCase ).sequences
lowerCAmelCase__ :Union[str, Any] = pt_model.generate(torch.tensor(__UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCAmelCase__ :Union[str, Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = self._get_input_ids_and_config()
lowerCAmelCase__ :Any = False
lowerCAmelCase__ :Any = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[str] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Any = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = jit(model.generate )
lowerCAmelCase__ :Optional[Any] = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = self._get_input_ids_and_config()
lowerCAmelCase__ :int = True
lowerCAmelCase__ :Optional[int] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Dict = model_class(__UpperCAmelCase )
lowerCAmelCase__ :int = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = jit(model.generate )
lowerCAmelCase__ :Dict = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = self._get_input_ids_and_config()
lowerCAmelCase__ :Optional[Any] = False
lowerCAmelCase__ :Dict = max_length
lowerCAmelCase__ :Dict = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[str] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Dict = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Any = jit(model.generate )
lowerCAmelCase__ :Dict = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self._get_input_ids_and_config()
lowerCAmelCase__ :int = False
lowerCAmelCase__ :Optional[Any] = max_length
lowerCAmelCase__ :Optional[Any] = 2
lowerCAmelCase__ :Optional[int] = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Union[str, Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :str = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = self._get_input_ids_and_config()
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :Tuple = max_length
lowerCAmelCase__ :Optional[int] = 0.8
lowerCAmelCase__ :Any = 1_0
lowerCAmelCase__ :List[Any] = 0.3
lowerCAmelCase__ :Tuple = 1
lowerCAmelCase__ :Union[str, Any] = 8
lowerCAmelCase__ :Optional[Any] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = jit(model.generate )
lowerCAmelCase__ :Any = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = self._get_input_ids_and_config()
lowerCAmelCase__ :Union[str, Any] = max_length
lowerCAmelCase__ :str = 1
lowerCAmelCase__ :Tuple = 8
lowerCAmelCase__ :Optional[Any] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = jit(model.generate )
lowerCAmelCase__ :Optional[int] = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self._get_input_ids_and_config()
lowerCAmelCase__ :str = max_length
lowerCAmelCase__ :Dict = 2
lowerCAmelCase__ :Dict = 1
lowerCAmelCase__ :Optional[int] = 8
lowerCAmelCase__ :Optional[Any] = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :List[Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :int = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Dict = jit(model.generate )
lowerCAmelCase__ :Dict = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ :Tuple = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ :Union[str, Any] = False
lowerCAmelCase__ :Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Tuple = model_class(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :int = jit(model.generate )
lowerCAmelCase__ :Optional[Any] = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ :Any = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ :Optional[Any] = True
lowerCAmelCase__ :Dict = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = jit(model.generate )
lowerCAmelCase__ :Optional[int] = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ :int = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ :Dict = 2
lowerCAmelCase__ :Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ :Any = model_class(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = jit(model.generate )
lowerCAmelCase__ :int = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
lowerCAmelCase__ :List[str] = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
lowerCAmelCase__ :Optional[int] = 'Hello world'
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCAmelCase , 'do_samples' ):
model.generate(__UpperCAmelCase , do_samples=__UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCAmelCase , 'foo' ):
lowerCAmelCase__ :Optional[int] = {'foo': 'bar'}
model.generate(__UpperCAmelCase , **__UpperCAmelCase )
| 254 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def UpperCamelCase__ ( lowercase__ : Optional[Any] ):
snake_case : List[Any] = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def UpperCamelCase__ ( lowercase__ : List[str] , lowercase__ : int ):
snake_case : Dict = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def UpperCamelCase__ ( lowercase__ : Optional[int] ):
snake_case : List[str] = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token") )
return token
def UpperCamelCase__ ( ):
snake_case : Tuple = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def UpperCamelCase__ ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
snake_case : Union[str, Any] = '''imagenet-1k-id2label.json'''
snake_case : int = 1000
snake_case : Optional[Any] = '''huggingface/label-files'''
snake_case : Optional[Any] = num_labels
snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type="dataset" ) ) , "r" ) )
snake_case : Optional[int] = {int(lowercase__ ): v for k, v in idalabel.items()}
snake_case : Union[str, Any] = idalabel
snake_case : int = {v: k for k, v in idalabel.items()}
snake_case : List[Any] = CvtConfig(num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
snake_case : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
snake_case : List[str] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case : List[Any] = [2, 2, 20]
snake_case : List[str] = [3, 12, 16]
snake_case : Any = [192, 768, 1024]
snake_case : Optional[Any] = CvtForImageClassification(lowercase__ )
snake_case : Any = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
snake_case : Union[str, Any] = image_size
snake_case : Dict = torch.load(lowercase__ , map_location=torch.device("cpu" ) )
snake_case : Tuple = OrderedDict()
snake_case : Union[str, Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case : Union[str, Any] = list_of_state_dict + cls_token(lowercase__ )
snake_case : Optional[Any] = list_of_state_dict + embeddings(lowercase__ )
for cnt in range(config.depth[idx] ):
snake_case : Tuple = list_of_state_dict + attention(lowercase__ , lowercase__ )
snake_case : Dict = list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowercase__ )
for i in range(len(lowercase__ ) ):
snake_case : List[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 148 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""MobileViTFeatureExtractor"""]
UpperCamelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
snake_case__ = False
snake_case__ = True
snake_case__ = False
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
snake_case__ = parser.parse_args()
snake_case__ = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
snake_case__ = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
snake_case__ = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
snake_case__ = reader.read()
snake_case__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
snake_case__ = UNetaDModel(**config)
else:
snake_case__ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
snake_case__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
snake_case__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
snake_case__ = config[key]
del config[key]
snake_case__ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
snake_case__ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
snake_case__ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
snake_case__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
snake_case__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
snake_case__ = param_value
snake_case__ = True
if not has_changed:
snake_case__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 4 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]:
A_ : int = 0
A_ : str = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A_ : Tuple = i + 1
else:
A_ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 4 | 1 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCAmelCase__ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
lowerCAmelCase__ = '''hopper-medium-v2'''
lowerCAmelCase__ = gym.make(env_name)
lowerCAmelCase__ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
lowerCAmelCase__ = env.reset()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCAmelCase__ = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = env.step(denorm_actions)
lowerCAmelCase__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCAmelCase__ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 108 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( _lowercase , _lowercase ) -> Image:
def brightness(_lowercase ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowercase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
a : Optional[Any] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 265 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
UpperCamelCase__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
UpperCamelCase__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
UpperCamelCase__ = BeautifulSoup(res.text, 'html.parser')
UpperCamelCase__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f'''https://google.com{link.get("href")}''')
| 143 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = '▁'
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = BigBirdTokenizer
__UpperCAmelCase : Optional[int] = BigBirdTokenizerFast
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : List[Any] = True
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = self.tokenizer_class(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "<s>"
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_4 )
def lowercase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = "I was born in 92000, and this is falsé."
UpperCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = "Hello World!"
UpperCAmelCase__ = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
UpperCAmelCase__ = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase__ = BigBirdConfig(attention_type="original_full" )
UpperCAmelCase__ = BigBirdModel(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def lowercase_ (self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
UpperCAmelCase__ = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def lowercase_ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = {"input_ids": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 143 | 1 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
snake_case__ : List[Any] = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
snake_case__ : List[str] = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
snake_case__ : Any = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_( datasets.Metric ):
def lowerCamelCase__ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Tuple=False ):
lowerCAmelCase : Optional[Any] = compute_bleu(
reference_corpus=UpperCamelCase_ , translation_corpus=UpperCamelCase_ , max_order=UpperCamelCase_ , smooth=UpperCamelCase_ )
((lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase), (lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 60 |
"""simple docstring"""
def _snake_case ( _snake_case : int ):
if not isinstance(_snake_case , _snake_case ):
raise TypeError('''only integers accepted as input''' )
else:
lowerCAmelCase : List[str] = str(abs(_snake_case ) )
lowerCAmelCase : Optional[Any] = [list(_snake_case ) for char in range(len(_snake_case ) )]
for index in range(len(_snake_case ) ):
num_transpositions[index].pop(_snake_case )
return max(
int(''''''.join(list(_snake_case ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 60 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : int = BarthezTokenizer
__lowercase : Tuple = BarthezTokenizerFast
__lowercase : int = True
__lowercase : Optional[int] = True
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """<pad>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(vocab_keys[-1] , """<mask>""")
self.assertEqual(len(lowerCAmelCase__) , 1_0_1_1_2_2)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2)
@require_torch
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__SCREAMING_SNAKE_CASE = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCAmelCase__ , max_length=len(lowerCAmelCase__) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="""pt""")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
__SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def snake_case_ ( self):
# fmt: off
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__SCREAMING_SNAKE_CASE = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=lowerCAmelCase__ , )
| 355 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1024 ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = [], []
__SCREAMING_SNAKE_CASE = list(zip(UpperCamelCase_ , UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = sorted_examples[0]
def is_too_big(UpperCamelCase_ ):
return tok(UpperCamelCase_ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__SCREAMING_SNAKE_CASE = new_src + """ """ + src
__SCREAMING_SNAKE_CASE = new_tgt + """ """ + tgt
if is_too_big(UpperCamelCase_ ) or is_too_big(UpperCamelCase_ ): # cant fit, finalize example
finished_src.append(UpperCamelCase_ )
finished_tgt.append(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = src, tgt
else: # can fit, keep adding
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase_ )
finished_tgt.append(UpperCamelCase_ )
return finished_src, finished_tgt
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = Path(UpperCamelCase_ )
save_path.mkdir(exist_ok=UpperCamelCase_ )
for split in ["train"]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = data_dir / f"{split}.source", data_dir / f"{split}.target"
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(UpperCamelCase_ ).open().readlines()]
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(UpperCamelCase_ ).open().readlines()]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = pack_examples(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print(f"packed {split} split from {len(UpperCamelCase_ )} examples -> {len(UpperCamelCase_ )}." )
Path(save_path / f"{split}.source" ).open("""w""" ).write("""\n""".join(UpperCamelCase_ ) )
Path(save_path / f"{split}.target" ).open("""w""" ).write("""\n""".join(UpperCamelCase_ ) )
for split in ["val", "test"]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = data_dir / f"{split}.source", data_dir / f"{split}.target"
shutil.copyfile(UpperCamelCase_ , save_path / f"{split}.source" )
shutil.copyfile(UpperCamelCase_ , save_path / f"{split}.target" )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCamelCase_ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCamelCase_ , default=128 )
parser.add_argument("""--data_dir""" , type=UpperCamelCase_ )
parser.add_argument("""--save_path""" , type=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 255 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A_ ( pl.LightningModule ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
super().__init__()
__lowerCamelCase : Optional[int] = model
__lowerCamelCase : Optional[int] = 2
__lowerCamelCase : Optional[int] = nn.Linear(self.model.config.hidden_size ,self.num_labels)
def lowerCAmelCase ( self : str):
pass
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
# load longformer model from model identifier
__lowerCamelCase : List[Any] = LongformerModel.from_pretrained(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = LightningModel(lowerCamelCase__ )
__lowerCamelCase : Tuple = torch.load(lowerCamelCase__ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
__lowerCamelCase : Union[str, Any] = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase__ )
print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
a =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a =parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 73 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254 | 0 |
import re
def lowerCAmelCase_ ( _lowercase : str) -> list:
"""simple docstring"""
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" , str_)]
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : Optional[int] = split_input(str_)
return "".join(
["""""".join([char.capitalize() for char in sub_str]) for sub_str in string_split])
def lowerCAmelCase_ ( _lowercase : str , _lowercase : bool , _lowercase : str) -> str:
"""simple docstring"""
try:
a__ : str = split_input(_lowercase)
if upper:
a__ : Tuple = """""".join(
[
separator.join([char.upper() for char in sub_str])
for sub_str in string_split
])
else:
a__ : Tuple = """""".join(
[
separator.join([char.lower() for char in sub_str])
for sub_str in string_split
])
return res_str
except IndexError:
return "not valid string"
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
return to_simple_case(_lowercase)
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
try:
a__ : Optional[Any] = to_simple_case(_lowercase)
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def lowerCAmelCase_ ( _lowercase : str , _lowercase : bool) -> str:
"""simple docstring"""
return to_complex_case(_lowercase , _lowercase , """_""")
def lowerCAmelCase_ ( _lowercase : str , _lowercase : bool) -> str:
"""simple docstring"""
return to_complex_case(_lowercase , _lowercase , """-""")
if __name__ == "__main__":
__import__("doctest").testmod()
| 266 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : float , _lowercase : int) -> float:
"""simple docstring"""
a__ : Union[str, Any] = u
for i in range(1 , _lowercase):
a__ : Optional[int] = temp * (u - i)
return temp
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
a__ : Tuple = int(input("""enter the numbers of values: """))
a__ : list[list[float]] = []
for _ in range(_lowercase):
y.append([])
for i in range(_lowercase):
for j in range(_lowercase):
y[i].append(_lowercase)
a__ : Optional[Any] = 0
print("""enter the values of parameters in a list: """)
a__ : List[Any] = list(map(_lowercase , input().split()))
print("""enter the values of corresponding parameters: """)
for i in range(_lowercase):
a__ : Optional[Any] = float(input())
a__ : Tuple = int(input("""enter the value to interpolate: """))
a__ : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowercase):
for j in range(n - i):
a__ : int = y[j + 1][i - 1] - y[j][i - 1]
a__ : Optional[int] = y[0][0]
for i in range(1 , _lowercase):
summ += (ucal(_lowercase , _lowercase) * y[0][i]) / math.factorial(_lowercase)
print(F'''the value at {value} is {summ}''')
if __name__ == "__main__":
main()
| 266 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__snake_case =False
__snake_case =True
__snake_case =False
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__snake_case =parser.parse_args()
__snake_case ={
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
__snake_case ={
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
__snake_case ="""""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
__snake_case =reader.read()
__snake_case =json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
__snake_case =UNetaDModel(**config)
else:
__snake_case =UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
__snake_case =class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__snake_case =dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__snake_case =config[key]
del config[key]
__snake_case =[k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
__snake_case =[k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
__snake_case =torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
__snake_case ={}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
__snake_case =False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
__snake_case =param_value
__snake_case =True
if not has_changed:
__snake_case =param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 4 |
'''simple docstring'''
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 4 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case__ ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float , ) -> str:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
snake_case__ = sys.version_info >= (3, 10)
def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 4_2
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'titi'
_lowerCAmelCase = 'toto'
_lowerCAmelCase = 4_2
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[int] = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[1, 2, 3] )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = field()
_lowerCAmelCase = field()
_lowerCAmelCase = field()
def _a ( self : Tuple ):
"""simple docstring"""
A_ : Tuple = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = 42
_lowerCAmelCase = field()
_lowerCAmelCase = None
_lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} )
_lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} )
_lowerCAmelCase = None
_lowerCAmelCase = list_field(default=[] )
_lowerCAmelCase = list_field(default=[] )
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) )
del xx["type"], yy["type"]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase )
self.assertFalse(example.flag )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : int = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Any = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
A_ : Dict = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : Any = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : str = HfArgumentParser(_lowerCamelCase )
A_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : str = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
A_ : int = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _a ( self : Optional[int] ):
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
_lowerCAmelCase = "toto"
A_ : List[str] = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
A_ : int = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def _a ( self : Dict ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
_lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase )
A_ : Tuple = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_lowerCamelCase )
for dataclass_type in dataclass_types:
A_ : int = HfArgumentParser(_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = parser.parse_args([] )
self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) )
A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Dict = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[Any] = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , )
expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase )
expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase )
self.argparsersEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = HfArgumentParser(_lowerCamelCase )
A_ : Union[str, Any] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0]
A_ : str = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Any = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase )
A_ : List[str] = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
A_ : Optional[Any] = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : int ):
"""simple docstring"""
A_ : int = HfArgumentParser(_lowerCamelCase )
A_ : Tuple = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' )
os.mkdir(_lowerCamelCase )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
A_ : int = BasicExample(**_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = HfArgumentParser(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 4 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = MgpstrTokenizer
__lowerCamelCase = False
__lowerCamelCase = {}
__lowerCamelCase = False
def __a ( self ) -> Any:
'''simple docstring'''
super().setUp()
# fmt: off
snake_case__ : Tuple = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
snake_case__ : int = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
def __a ( self , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : Any = 'tester'
snake_case__ : Union[str, Any] = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = self.get_tokenizers(do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case__ : int = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
snake_case__ : Tuple = tokenizer.encode([special_token] , add_special_tokens=__UpperCamelCase )
self.assertEqual(len(__UpperCamelCase ) , 1 )
snake_case__ : Any = tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
self.assertTrue(special_token not in decoded )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case__ , snake_case__ : Dict = self.get_input_output_texts(__UpperCamelCase )
snake_case__ : int = tokenizer.tokenize(__UpperCamelCase )
snake_case__ : int = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
snake_case__ : Union[str, Any] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertNotEqual(len(__UpperCamelCase ) , 0 )
snake_case__ : Optional[int] = tokenizer.decode(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(text_a.replace(' ' , '' ) , __UpperCamelCase )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def __a ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def __a ( self ) -> str:
'''simple docstring'''
pass
| 143 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( _lowerCamelCase ):
@staticmethod
@abstractmethod
def __a ( __UpperCamelCase ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __a ( self ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError()
| 143 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: List[str] = args.log_outputs
UpperCamelCase_: List[Any] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
UpperCamelCase_: List[Any] = load_metric('wer' )
UpperCamelCase_: Any = load_metric('cer' )
# compute metrics
UpperCamelCase_: str = wer.compute(references=result['target'] , predictions=result['prediction'] )
UpperCamelCase_: Optional[Any] = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
UpperCamelCase_: Optional[Any] = F'''WER: {wer_result}\nCER: {cer_result}'''
print(UpperCAmelCase__ )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(UpperCAmelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase_: str = F'''log_{dataset_id}_predictions.txt'''
UpperCamelCase_: Tuple = F'''log_{dataset_id}_targets.txt'''
with open(UpperCAmelCase__ , 'w' ) as p, open(UpperCAmelCase__ , 'w' ) as t:
# mapping function to write output
def write_to_file(UpperCAmelCase__ , UpperCAmelCase__ ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(UpperCAmelCase__ , with_indices=UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ ) -> str:
UpperCamelCase_: List[str] = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase_: Dict = re.sub(UpperCAmelCase__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase_: str = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
UpperCamelCase_: Any = ' '.join(text.split(UpperCAmelCase__ ) )
return text
def snake_case (UpperCAmelCase__ ) -> Optional[int]:
# load dataset
UpperCamelCase_: Optional[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCAmelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase_: int = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCamelCase_: Optional[int] = feature_extractor.sampling_rate
# resample audio
UpperCamelCase_: List[Any] = dataset.cast_column('audio' , Audio(sampling_rate=UpperCAmelCase__ ) )
# load eval pipeline
if args.device is None:
UpperCamelCase_: Dict = 0 if torch.cuda.is_available() else -1
UpperCamelCase_: List[str] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(UpperCAmelCase__ ):
UpperCamelCase_: List[Any] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCamelCase_: Any = prediction['text']
UpperCamelCase_: Union[str, Any] = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
UpperCamelCase_: str = dataset.map(UpperCAmelCase__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A_ : List[str] = parser.parse_args()
main(args)
| 292 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
UpperCamelCase_: Optional[int] = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_lowerCamelCase ).to(_lowerCamelCase )
UpperCamelCase_: Dict = AutoTokenizer.from_pretrained('google/mt5-small' )
UpperCamelCase_: Dict = tokenizer('Hello there' , return_tensors='pt' ).input_ids
UpperCamelCase_: Optional[Any] = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
UpperCamelCase_: int = model(input_ids.to(_lowerCamelCase ) , labels=labels.to(_lowerCamelCase ) ).loss
UpperCamelCase_: Tuple = -(labels.shape[-1] * loss.item())
UpperCamelCase_: Any = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 292 | 1 |
"""simple docstring"""
A: Union[str, Any] = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A: Optional[int] = frozenset(["prompt", "negative_prompt"])
A: Union[str, Any] = frozenset([])
A: Optional[Any] = frozenset(["image"])
A: str = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
A: Dict = frozenset(["image"])
A: Dict = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A: Tuple = frozenset(["prompt", "image", "negative_prompt"])
A: List[Any] = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A: Optional[Any] = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
A: List[Any] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A: Union[str, Any] = frozenset(["image", "mask_image"])
A: Dict = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A: List[str] = frozenset(["example_image", "image", "mask_image"])
A: Any = frozenset(["class_labels"])
A: Optional[int] = frozenset(["class_labels"])
A: int = frozenset(["batch_size"])
A: Union[str, Any] = frozenset([])
A: Union[str, Any] = frozenset(["batch_size"])
A: int = frozenset([])
A: List[Any] = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A: Any = frozenset(["prompt", "negative_prompt"])
A: List[str] = frozenset(["input_tokens"])
A: str = frozenset(["input_tokens"])
| 109 |
"""simple docstring"""
from collections.abc import Sequence
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase : Tuple = 0 if allow_empty_subarrays else float('-inf' )
lowercase : Dict = 0.0
for num in arr:
lowercase : str = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase : Optional[Any] = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCamelCase: Optional[int] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 255 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Tuple = BarthezTokenizer
_SCREAMING_SNAKE_CASE : int = BarthezTokenizerFast
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Tuple = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_UpperCamelCase )
lowerCAmelCase__ = tokenizer
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = '<pad>'
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_UpperCamelCase ) , 10_11_22 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ = [0, 57, 30_18, 7_03_07, 91, 2]
lowerCAmelCase__ = self.tokenizer(
_UpperCamelCase , max_length=len(_UpperCamelCase ) , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ = tokenizer.tokenize(_UpperCamelCase )
lowerCAmelCase__ = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(_UpperCamelCase )
lowerCAmelCase__ = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# fmt: off
lowerCAmelCase__ = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCAmelCase__ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_UpperCamelCase , )
| 122 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Dict = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__snake_case : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122 | 1 |
"""simple docstring"""
from manim import *
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = Rectangle(height=0.5, width=0.5 )
__A = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
__A = [mem.copy() for i in range(6 )]
__A = [mem.copy() for i in range(6 )]
__A = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase, buff=0 )
__A = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase, buff=0 )
__A = VGroup(_lowerCamelCase, _lowerCamelCase ).arrange(_lowerCamelCase, buff=0 )
__A = Text('''CPU''', font_size=24 )
__A = Group(_lowerCamelCase, _lowerCamelCase ).arrange(_lowerCamelCase, buff=0.5, aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
__A = [mem.copy() for i in range(1 )]
__A = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase, buff=0 )
__A = Text('''GPU''', font_size=24 )
__A = Group(_lowerCamelCase, _lowerCamelCase ).arrange(_lowerCamelCase, buff=0.5, aligned_edge=_lowerCamelCase )
gpu.align_to(_lowerCamelCase, _lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCamelCase )
__A = [mem.copy() for i in range(6 )]
__A = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase, buff=0 )
__A = Text('''Model''', font_size=24 )
__A = Group(_lowerCamelCase, _lowerCamelCase ).arrange(_lowerCamelCase, buff=0.5, aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCamelCase, run_time=1 ), Create(_lowerCamelCase, run_time=1 ), Create(_lowerCamelCase, run_time=1 ), )
__A = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.', font_size=24, )
__A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__A = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model', font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase, run_time=2.5 ), Write(_lowerCamelCase ), Write(_lowerCamelCase ) )
self.add(_lowerCamelCase )
__A = []
__A = []
__A = []
for i, rect in enumerate(_lowerCamelCase ):
__A = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase, opacity=0.7 )
cpu_target.move_to(_lowerCamelCase )
cpu_target.generate_target()
__A = 0.46 / 4
__A = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=_lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=_lowerCamelCase, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=_lowerCamelCase, buff=0.0 )
cpu_targs.append(_lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCamelCase ) )
second_animations.append(MoveToTarget(_lowerCamelCase, run_time=1.5 ) )
self.play(*_lowerCamelCase )
self.play(*_lowerCamelCase )
self.wait()
| 266 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class snake_case :
'''simple docstring'''
def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ):
'''simple docstring'''
__A = np.random.default_rng(_lowerCamelCase )
__A = length
__A = rng.normal(size=(length,) ).astype(np.floataa )
__A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa )
def __len__( self : str ):
'''simple docstring'''
return self.length
def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class snake_case ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
super().__init__()
__A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__A = True
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__A = False
return x * self.a[0] + self.b[0]
class snake_case ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ):
'''simple docstring'''
super().__init__()
__A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
__A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
__A = True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ):
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__A = False
return x * self.a + self.b
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
__A = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__A = load_dataset('''csv''' , data_files=__UpperCamelCase )
__A = datasets['''train'''].unique('''label''' )
__A = {v: i for i, v in enumerate(__UpperCamelCase )}
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__A = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' )
if "label" in examples:
__A = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__A = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 )
__A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 266 | 1 |
def lowerCamelCase_ ( lowerCAmelCase: list )-> list:
def merge(lowerCAmelCase: list , lowerCAmelCase: list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowerCAmelCase ) <= 1:
return collection
_snake_case : str = len(lowerCAmelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 260 |
from __future__ import annotations
lowerCAmelCase_ = []
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int , lowerCAmelCase: int )-> bool:
for i in range(len(lowerCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCAmelCase , -1 , -1 ) , range(lowerCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCAmelCase , -1 , -1 ) , range(lowerCAmelCase , len(lowerCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] , lowerCAmelCase: int )-> bool:
if row >= len(lowerCAmelCase ):
solution.append(lowerCAmelCase )
printboard(lowerCAmelCase )
print()
return True
for i in range(len(lowerCAmelCase ) ):
if is_safe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_snake_case : Dict = 1
solve(lowerCAmelCase , row + 1 )
_snake_case : str = 0
return False
def lowerCamelCase_ ( lowerCAmelCase: list[list[int]] )-> None:
for i in range(len(lowerCAmelCase ) ):
for j in range(len(lowerCAmelCase ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase_ = 8
lowerCAmelCase_ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 260 | 1 |
from __future__ import annotations
__A = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
lowerCamelCase__: List[str] =[
[0 for col in range(len(grid[0] ) )] for row in range(len(__a ) )
] # the reference grid
lowerCamelCase__: Optional[int] =1
lowerCamelCase__: int =[
[0 for col in range(len(grid[0] ) )] for row in range(len(__a ) )
] # the action grid
lowerCamelCase__: Dict =init[0]
lowerCamelCase__: Dict =init[1]
lowerCamelCase__: List[str] =0
lowerCamelCase__: int =g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__: Optional[int] =[[f, g, x, y]]
lowerCamelCase__: Tuple =False # flag that is set when search is complete
lowerCamelCase__: Union[str, Any] =False # flag set if we can't find expand
while not found and not resign:
if len(__a ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__: int =cell.pop()
lowerCamelCase__: List[str] =next_cell[2]
lowerCamelCase__: List[Any] =next_cell[3]
lowerCamelCase__: Dict =next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__: Union[str, Any] =True
else:
for i in range(len(__a ) ): # to try out different valid actions
lowerCamelCase__: Any =x + DIRECTIONS[i][0]
lowerCamelCase__: List[Any] =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__a ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__: Optional[int] =g + cost
lowerCamelCase__: Optional[int] =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase__: int =1
lowerCamelCase__: Any =i
lowerCamelCase__: Tuple =[]
lowerCamelCase__: Any =goal[0]
lowerCamelCase__: Union[str, Any] =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__: Any =x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__: int =y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__: Any =xa
lowerCamelCase__: int =ya
invpath.append([x, y] )
lowerCamelCase__: str =[]
for i in range(len(__a ) ):
path.append(invpath[len(__a ) - 1 - i] )
return path, action
if __name__ == "__main__":
__A = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__A = [0, 0]
# all coordinates are given in format [y,x]
__A = [len(grid) - 1, len(grid[0]) - 1]
__A = 1
# the cost map which pushes the path closer to the goal
__A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__A = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__A = 99
__A , __A = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 10 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : int ):
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase = []
lowerCAmelCase = -1
for i in range(lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase = 0
lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a_ ( lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : list[int] ):
lowerCAmelCase = [0] * no_of_processes
for i in range(lowerCamelCase ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
__snake_case =4
__snake_case =[2, 5, 3, 7]
__snake_case =[0, 0, 0, 0]
__snake_case =calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__snake_case =calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 4 | 0 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a : Optional[int] = logging.get_logger(__name__)
_a : str = {"""vocab_file""": """spiece.model"""}
_a : List[str] = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
_a : Any = {
"""AI-Sweden/gpt-sw3-126m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-350m""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-1.6b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-6.7b""": 2_0_4_8,
"""AI-Sweden/gpt-sw3-20b""": 2_0_4_8,
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : int =VOCAB_FILES_NAMES
a : List[Any] =PRETRAINED_VOCAB_FILES_MAP
a : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] =["""input_ids""", """attention_mask"""]
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCAmelCase = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
__lowerCAmelCase = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__lowerCAmelCase = """<|endoftext|>""" if eos_token is None else eos_token
__lowerCAmelCase = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__lowerCAmelCase = unk_token if pad_token is None else pad_token
__lowerCAmelCase = eos_token if bos_token is None else bos_token
else:
__lowerCAmelCase = """<pad>""" if pad_token is None else pad_token
__lowerCAmelCase = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE,remove_space=__SCREAMING_SNAKE_CASE,keep_accents=__SCREAMING_SNAKE_CASE,bos_token=__SCREAMING_SNAKE_CASE,eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,sp_model_kwargs=self.sp_model_kwargs,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = remove_space
__lowerCAmelCase = keep_accents
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
__lowerCAmelCase = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__lowerCAmelCase = re.compile(
f'[{"".join(map(__SCREAMING_SNAKE_CASE,list(range(0,9 ) ) + list(range(11,32 ) ) + list(range(1_27,1_60 ) ) + [1_60, 1_73, 82_03] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self,"""sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.non_printing_characters_re.sub("""""",__SCREAMING_SNAKE_CASE )
# Normalize whitespaces
__lowerCAmelCase = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
__lowerCAmelCase = unicodedata.normalize("""NFC""",__SCREAMING_SNAKE_CASE )
return text
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
return self.sp_model.encode(__SCREAMING_SNAKE_CASE,out_type=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return out_string
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = """"""
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = [self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text]
__lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
__lowerCAmelCase = torch.tensor(__SCREAMING_SNAKE_CASE )
return token_ids
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.decode(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
__lowerCAmelCase = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__SCREAMING_SNAKE_CASE ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__SCREAMING_SNAKE_CASE )
| 46 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Dict =MvpTokenizer
a : int =MvpTokenizerFast
a : Any =True
a : int =filter_roberta_detectors
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().setUp()
__lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE,range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,max_length=len(__SCREAMING_SNAKE_CASE ),padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9),batch.input_ids.shape )
self.assertEqual((2, 9),batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Test that special tokens are reset
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""",__SCREAMING_SNAKE_CASE )
self.assertIn("""attention_mask""",__SCREAMING_SNAKE_CASE )
self.assertNotIn("""labels""",__SCREAMING_SNAKE_CASE )
self.assertNotIn("""decoder_attention_mask""",__SCREAMING_SNAKE_CASE )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(text_target=__SCREAMING_SNAKE_CASE,max_length=32,padding="""max_length""",return_tensors="""pt""" )
self.assertEqual(32,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""],padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape,(2, 10_24) )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization."""]
__lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,text_target=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
__lowerCAmelCase = inputs["""input_ids"""]
__lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """A, <mask> AllenNLP sentence."""
__lowerCAmelCase = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ),sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ),sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ),)
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 46 | 1 |
"""simple docstring"""
_snake_case : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 292 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292 | 1 |
def __lowerCamelCase ( lowerCamelCase__ : int = 1000000 ):
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 1
lowerCamelCase = {1: 1}
for inputa in range(2 , lowerCamelCase__ ):
lowerCamelCase = 0
lowerCamelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase = counter
if counter > pre_counter:
lowerCamelCase = inputa
lowerCamelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 66 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : str = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : int = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
UpperCAmelCase : str = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
UpperCamelCase : List[str] = DistilBertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A ) != tokenize_chinese_chars
):
lowerCamelCase = getattr(A , normalizer_state.pop("""type""" ) )
lowerCamelCase = do_lower_case
lowerCamelCase = strip_accents
lowerCamelCase = tokenize_chinese_chars
lowerCamelCase = normalizer_class(**A )
lowerCamelCase = do_lower_case
def __A ( self , A , A=None ) -> Tuple:
'''simple docstring'''
lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 66 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 |
_A = [0, 2, 4, 6, 8]
_A = [1, 3, 5, 7, 9]
def lowerCamelCase__ ( a__ : int , a__ : int , a__ : list[int] , a__ : int ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCamelCase_ = 0
for digit in range(10 ):
UpperCamelCase_ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a__ , a__ )
return result
UpperCamelCase_ = 0
for digita in range(10 ):
UpperCamelCase_ = digita
if (remainder + digita) % 2 == 0:
UpperCamelCase_ = ODD_DIGITS
else:
UpperCamelCase_ = EVEN_DIGITS
for digita in other_parity_digits:
UpperCamelCase_ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a__ , a__ , )
return result
def lowerCamelCase__ ( a__ : int = 9 ) -> int:
UpperCamelCase_ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a__ , 0 , [0] * length , a__ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 122 | 1 |
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 360 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = dataset
UpperCAmelCase__ : Union[str, Any] = process
UpperCAmelCase__ : List[Any] = params
def __len__(self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = self.dataset[i]
UpperCAmelCase__ : Any = self.process(_lowerCamelCase , **self.params )
return processed
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = loader
UpperCAmelCase__ : int = infer
UpperCAmelCase__ : Optional[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Any = loader_batch_size
# Internal bookkeeping
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Union[str, Any] = None
def __len__(self ):
"""simple docstring"""
return len(self.loader )
def __iter__(self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = iter(self.loader )
return self
def _a (self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCAmelCase__ : Optional[int] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCAmelCase__ : List[str] = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
# Convert ModelOutput to tuple first
UpperCAmelCase__ : List[Any] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase__ : str = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase__ : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCamelCase , _lowerCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase__ : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCAmelCase__ : str = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase__ : Tuple = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase__ : Dict = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCAmelCase__ : Optional[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCAmelCase__ : Union[str, Any] = self._loader_batch_data.__class__(_lowerCamelCase )
self._loader_batch_index += 1
return result
def _a (self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCAmelCase__ : str = next(self.iterator )
UpperCAmelCase__ : Union[str, Any] = self.infer(_lowerCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCamelCase , torch.Tensor ):
UpperCAmelCase__ : List[Any] = processed
else:
UpperCAmelCase__ : List[str] = list(processed.keys() )[0]
UpperCAmelCase__ : List[str] = processed[key]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : Any = len(_lowerCamelCase )
else:
UpperCAmelCase__ : Union[str, Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase__ : Optional[int] = observed_batch_size
# Setting internal index to unwrap the batch
UpperCAmelCase__ : List[Any] = processed
UpperCAmelCase__ : Optional[int] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
"""simple docstring"""
super().__init__(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __iter__(self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = iter(self.loader )
UpperCAmelCase__ : List[Any] = None
return self
def _a (self ):
"""simple docstring"""
if self.subiterator is None:
UpperCAmelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCAmelCase__ : List[str] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCAmelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
UpperCAmelCase__ : List[str] = next(self.subiterator )
return processed
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __iter__(self ):
"""simple docstring"""
UpperCAmelCase__ : str = iter(self.loader )
return self
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase__ : List[Any] = self.loader_batch_item()
UpperCAmelCase__ : Dict = item.pop("""is_last""" )
accumulator.append(_lowerCamelCase )
if is_last:
return accumulator
while not is_last:
UpperCAmelCase__ : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCamelCase , torch.Tensor ):
UpperCAmelCase__ : Dict = processed
else:
UpperCAmelCase__ : List[Any] = list(processed.keys() )[0]
UpperCAmelCase__ : List[Any] = processed[key]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : int = len(_lowerCamelCase )
else:
UpperCAmelCase__ : List[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase__ : str = observed_batch_size
UpperCAmelCase__ : Union[str, Any] = processed
UpperCAmelCase__ : List[Any] = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase__ : Union[str, Any] = self.loader_batch_item()
UpperCAmelCase__ : int = item.pop("""is_last""" )
accumulator.append(_lowerCamelCase )
if is_last:
return accumulator
else:
UpperCAmelCase__ : Any = processed
UpperCAmelCase__ : Any = item.pop("""is_last""" )
accumulator.append(_lowerCamelCase )
return accumulator
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = dataset
UpperCAmelCase__ : Union[str, Any] = key
def __len__(self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__(self , _lowerCamelCase ):
"""simple docstring"""
return self.dataset[i][self.key]
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = dataset
UpperCAmelCase__ : Any = keya
UpperCAmelCase__ : str = keya
def __len__(self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__(self , _lowerCamelCase ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 166 | 0 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__A : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase)
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : int , *__UpperCamelCase : str , **__UpperCamelCase : List[Any] )->Optional[int]:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
requires_backends(self , '''decord''' )
self.check_model_type(__UpperCamelCase )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : int=None , __UpperCamelCase : Optional[Any]=None )->Any:
_UpperCAmelCase = {}
if frame_sampling_rate is not None:
_UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
_UpperCAmelCase = num_frames
_UpperCAmelCase = {}
if top_k is not None:
_UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : int , __UpperCamelCase : Union[str, List[str]] , **__UpperCamelCase : List[str] )->Union[str, Any]:
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str]=None , __UpperCamelCase : List[Any]=1 )->Tuple:
if num_frames is None:
_UpperCAmelCase = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
_UpperCAmelCase = BytesIO(requests.get(__UpperCamelCase ).content )
_UpperCAmelCase = VideoReader(__UpperCamelCase )
videoreader.seek(0 )
_UpperCAmelCase = 0
_UpperCAmelCase = num_frames * frame_sampling_rate - 1
_UpperCAmelCase = np.linspace(__UpperCamelCase , __UpperCamelCase , num=__UpperCamelCase , dtype=np.intaa )
_UpperCAmelCase = videoreader.get_batch(__UpperCamelCase ).asnumpy()
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = self.image_processor(__UpperCamelCase , return_tensors=self.framework )
return model_inputs
def lowercase__ ( self : List[str] , __UpperCamelCase : str )->Optional[int]:
_UpperCAmelCase = self.model(**__UpperCamelCase )
return model_outputs
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[int]=5 )->int:
if top_k > self.model.config.num_labels:
_UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
_UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
_UpperCAmelCase , _UpperCAmelCase = probs.topk(__UpperCamelCase )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_UpperCAmelCase = scores.tolist()
_UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCamelCase , __UpperCamelCase )]
| 260 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
_UpperCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )]
# Reverse whole list
_UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
__A : List[str] = input("Enter numbers separated by a comma:\n").strip()
__A : List[Any] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 260 | 1 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__( self , a__ , a__=13 , a__=32 , a__=3 , a__=4 , a__=[10, 20, 30, 40] , a__=[2, 2, 3, 2] , a__=True , a__=True , a__=37 , a__="gelu" , a__=10 , a__=0.0_2 , a__=["stage2", "stage3", "stage4"] , a__=3 , a__=None , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Dict = num_stages
_lowerCAmelCase : Dict = hidden_sizes
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Dict = out_features
_lowerCAmelCase : int = num_labels
_lowerCAmelCase : List[str] = scope
_lowerCAmelCase : Optional[int] = num_stages
def __A ( self ):
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __A ( self ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=a__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=a__ , loss_ignore_index=255 , num_labels=self.num_labels , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = UperNetForSemanticSegmentation(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Optional[int] = config_and_inputs
_lowerCAmelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : int = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_UpperCamelCase : Tuple = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
_UpperCamelCase : List[str] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Any = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Optional[Any] = False
def __A ( self ):
_lowerCAmelCase : Any = UperNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __A ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ):
return
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
_lowerCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a__ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def __A ( self ):
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def __A ( self ):
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def __A ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __A ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __A ( self ):
pass
def __A ( self ):
def check_hidden_states_output(a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : List[Any] = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Tuple = True
check_hidden_states_output(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = _config_zero_init(a__ )
_lowerCAmelCase : Optional[Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(config=a__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def __A ( self ):
pass
@slow
def __A ( self ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = UperNetForSemanticSegmentation.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" ,repo_type="""dataset""" ,filename="""ADE_val_00000001.jpg""" )
_lowerCAmelCase : Any = Image.open(_lowerCamelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_lowerCAmelCase : str = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a__ )
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Optional[Any] = processor(images=a__ , return_tensors="""pt""" ).to(a__ )
with torch.no_grad():
_lowerCAmelCase : Any = model(**a__ )
_lowerCAmelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCAmelCase : List[str] = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a__ , atol=1e-4 ) )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_lowerCAmelCase : Dict = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a__ )
_lowerCAmelCase : List[str] = prepare_img()
_lowerCAmelCase : Tuple = processor(images=a__ , return_tensors="""pt""" ).to(a__ )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**a__ )
_lowerCAmelCase : Optional[int] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCAmelCase : Tuple = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , a__ , atol=1e-4 ) )
| 369 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A :
def __init__( self , a__ , a__=2 , a__=3 , a__=4 , a__=2 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=36 , a__=3 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=6 , a__=6 , a__=3 , a__=4 , a__=None , a__=1000 , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : str = image_size
_lowerCAmelCase : str = patch_size
_lowerCAmelCase : str = text_seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : List[str] = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Dict = coordinate_size
_lowerCAmelCase : Optional[int] = shape_size
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase : Optional[int] = text_seq_length
_lowerCAmelCase : Any = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase : Any = self.text_seq_length + self.image_seq_length
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Optional[Any] = bbox[i, j, 3]
_lowerCAmelCase : List[str] = bbox[i, j, 1]
_lowerCAmelCase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : int = bbox[i, j, 2]
_lowerCAmelCase : Optional[int] = bbox[i, j, 0]
_lowerCAmelCase : Optional[int] = t
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCAmelCase : int = None
_lowerCAmelCase : int = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCAmelCase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = LayoutLMvaModel(config=a__ )
model.to(a__ )
model.eval()
# text + image
_lowerCAmelCase : Optional[Any] = model(a__ , pixel_values=a__ )
_lowerCAmelCase : Any = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ )
_lowerCAmelCase : List[Any] = model(a__ , bbox=a__ , pixel_values=a__ , token_type_ids=a__ )
_lowerCAmelCase : Tuple = model(a__ , bbox=a__ , pixel_values=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCAmelCase : Optional[Any] = model(pixel_values=a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = LayoutLMvaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : str = LayoutLMvaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Dict = LayoutLMvaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Any = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
_UpperCamelCase : Any = False
_UpperCamelCase : int = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __A ( self ):
_lowerCAmelCase : Any = LayoutLMvaModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self , a__ , a__ , a__=False ):
_lowerCAmelCase : List[str] = copy.deepcopy(a__ )
if model_class in get_values(a__ ):
_lowerCAmelCase : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a__ ):
_lowerCAmelCase : List[Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in get_values(a__ ):
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
_lowerCAmelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a__ , )
return inputs_dict
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : int = type
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def __A ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LayoutLMvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return LayoutLMvaImageProcessor(apply_ocr=a__ ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(a__ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=a__ , return_tensors="""pt""" ).pixel_values.to(a__ )
_lowerCAmelCase : Optional[Any] = torch.tensor([[1, 2]] )
_lowerCAmelCase : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCAmelCase : str = model(
input_ids=input_ids.to(a__ ) , bbox=bbox.to(a__ ) , pixel_values=pixel_values.to(a__ ) , )
# verify the logits
_lowerCAmelCase : Optional[int] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a__ )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-4 ) )
| 126 | 0 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase = nn.functional.normalize(SCREAMING_SNAKE_CASE )
lowerCAmelCase = nn.functional.normalize(SCREAMING_SNAKE_CASE )
return torch.mm(SCREAMING_SNAKE_CASE , normalized_text_embeds.t() )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = ['CLIPEncoderLayer']
def __init__( self , lowercase ) -> Optional[int]:
super().__init__(lowercase )
lowerCAmelCase = CLIPVisionModel(config.vision_config )
lowerCAmelCase = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase )
lowerCAmelCase = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase )
lowerCAmelCase = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase )
lowerCAmelCase = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase )
lowerCAmelCase = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase )
@torch.no_grad()
def _snake_case ( self , lowercase , lowercase ) -> Optional[Any]:
lowerCAmelCase = self.vision_model(lowercase )[1] # pooled_output
lowerCAmelCase = self.visual_projection(lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy()
lowerCAmelCase = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy()
lowerCAmelCase = []
lowerCAmelCase = image_embeds.shape[0]
for i in range(lowercase ):
lowerCAmelCase = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCAmelCase = special_cos_dist[i][concept_idx]
lowerCAmelCase = self.special_care_embeds_weights[concept_idx].item()
lowerCAmelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
lowerCAmelCase = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCAmelCase = cos_dist[i][concept_idx]
lowerCAmelCase = self.concept_embeds_weights[concept_idx].item()
lowerCAmelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowercase )
result.append(lowercase )
lowerCAmelCase = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _snake_case ( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.vision_model(lowercase )[1] # pooled_output
lowerCAmelCase = self.visual_projection(lowercase )
lowerCAmelCase = cosine_distance(lowercase , self.special_care_embeds )
lowerCAmelCase = cosine_distance(lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase = 0.0
lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCAmelCase = torch.any(special_scores > 0 , dim=1 )
lowerCAmelCase = special_care * 0.01
lowerCAmelCase = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCAmelCase = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCAmelCase = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 46 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'imagegpt'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=512 + 1 , lowercase=32 * 32 , lowercase=512 , lowercase=24 , lowercase=8 , lowercase=None , lowercase="quick_gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , **lowercase , ) -> Any:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = scale_attn_by_inverse_layer_idx
lowerCAmelCase = reorder_and_upcast_attn
lowerCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase , **lowercase )
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _snake_case ( self , lowercase , lowercase = 1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 32 , lowercase = 32 , ) -> Mapping[str, Any]:
lowerCAmelCase = self._generate_dummy_images(lowercase , lowercase , lowercase , lowercase )
lowerCAmelCase = dict(preprocessor(images=lowercase , return_tensors=lowercase ) )
return inputs
| 46 | 1 |
"""simple docstring"""
import math
from collections.abc import Callable
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
UpperCAmelCase__ : Union[str, Any] = xa
UpperCAmelCase__ : Optional[Any] = xa
while True:
if x_n == x_na or function(lowerCAmelCase ) == function(lowerCAmelCase ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
UpperCAmelCase__ : Tuple = x_na - (
function(lowerCAmelCase ) / ((function(lowerCAmelCase ) - function(lowerCAmelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCAmelCase__ : List[Any] = x_na
UpperCAmelCase__ : List[Any] = x_na
def a__ ( lowerCAmelCase ) -> Dict:
return math.pow(lowerCAmelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 361 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def a__ ( lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase__ : Dict = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase__ : int = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase__ : Optional[int] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase__ : Union[str, Any] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(lowerCAmelCase )-1}""" )
if "norm" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase__ : int = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase__ : Union[str, Any] = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(lowerCAmelCase )-1}""" )
if "layer_norm1" in key:
UpperCAmelCase__ : Any = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase__ : int = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase__ : List[Any] = key.replace(F"""block{idx}""" , F"""block.{int(lowerCAmelCase )-1}""" )
if "attn.q" in key:
UpperCAmelCase__ : List[Any] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase__ : Tuple = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase__ : int = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase__ : List[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase__ : Optional[Any] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase__ : List[Any] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase__ : int = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(lowerCAmelCase )-1}""" )
if "bot_conv" in key:
UpperCAmelCase__ : int = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase__ : List[Any] = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase__ : List[Any] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase__ : List[str] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase__ : int = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase__ : Optional[int] = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase__ : Optional[Any] = value
return new_state_dict
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Dict:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase__ : Dict = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
UpperCAmelCase__ : int = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase__ : int = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase__ : int = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase__ : List[Any] = kv_bias[config.hidden_sizes[i] :]
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ : int = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return image
@torch.no_grad()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=None ) -> Union[str, Any]:
UpperCAmelCase__ : Any = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase__ : Any = GLPNImageProcessor()
# prepare image
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase__ : Tuple = torch.load(lowerCAmelCase , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase__ : Optional[Any] = rename_keys(lowerCAmelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase , lowerCAmelCase )
# create HuggingFace model and load state dict
UpperCAmelCase__ : Union[str, Any] = GLPNForDepthEstimation(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
# forward pass
UpperCAmelCase__ : Any = model(lowerCAmelCase )
UpperCAmelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase__ : int = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCAmelCase__ : Any = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase , )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_A = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 166 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__a = logging.get_logger(__name__)
__a = {"vocab_file": "spiece.model"}
__a = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self: Tuple , snake_case: Any , snake_case: Any=False , snake_case: Any=True , snake_case: List[Any]=False , snake_case: Tuple="<s>" , snake_case: List[str]="</s>" , snake_case: Tuple="<unk>" , snake_case: Optional[Any]="<sep>" , snake_case: Any="<pad>" , snake_case: Optional[Any]="<cls>" , snake_case: int="<mask>" , snake_case: List[Any]=["<eop>", "<eod>"] , snake_case: Optional[Dict[str, Any]] = None , **snake_case: int , ) -> None:
snake_case_ :Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
snake_case_ :int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
snake_case_ :str = 3
snake_case_ :List[Any] = do_lower_case
snake_case_ :List[str] = remove_space
snake_case_ :str = keep_accents
snake_case_ :str = vocab_file
snake_case_ :Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
snake_case_ :List[str] = jieba
snake_case_ :str = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
return len(self.sp_model )
def lowerCAmelCase_ ( self: int ) -> Any:
snake_case_ :Any = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Dict ) -> Dict:
snake_case_ :Optional[Any] = self.__dict__.copy()
snake_case_ :Dict = None
return state
def __setstate__( self: int , snake_case: str ) -> Tuple:
snake_case_ :Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ :int = {}
snake_case_ :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self: Any , snake_case: List[Any] ) -> str:
if self.remove_space:
snake_case_ :Dict = """ """.join(inputs.strip().split() )
else:
snake_case_ :Any = inputs
snake_case_ :Optional[Any] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
snake_case_ :Any = unicodedata.normalize("""NFKD""" , snake_case )
snake_case_ :int = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
snake_case_ :Tuple = outputs.lower()
return outputs
def lowerCAmelCase_ ( self: Optional[int] , snake_case: str ) -> List[str]:
snake_case_ :str = self.preprocess_text(snake_case )
snake_case_ :Optional[Any] = self.sp_model.encode(snake_case , out_type=snake_case )
snake_case_ :int = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
snake_case_ :List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ :Any = cur_pieces[1:]
else:
snake_case_ :Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def lowerCAmelCase_ ( self: int , snake_case: str ) -> Dict:
return self.sp_model.PieceToId(snake_case )
def lowerCAmelCase_ ( self: Dict , snake_case: int ) -> List[str]:
return self.sp_model.IdToPiece(snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] ) -> List[Any]:
snake_case_ :str = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def lowerCAmelCase_ ( self: int , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :Tuple = [self.sep_token_id]
snake_case_ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase_ ( self: int , snake_case: List[int] , snake_case: Optional[List[int]] = None , snake_case: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def lowerCAmelCase_ ( self: int , snake_case: List[int] , snake_case: Optional[List[int]] = None ) -> List[int]:
snake_case_ :List[Any] = [self.sep_token_id]
snake_case_ :str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ :List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
snake_case_ :Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
def lowerCAmelCase_ ( self: List[str] , *snake_case: str , **snake_case: List[Any] ) -> Tuple:
snake_case_ :int = super()._decode(*snake_case , **snake_case )
snake_case_ :Optional[int] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 66 |
"""simple docstring"""
from __future__ import annotations
__a = 10
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = 1
snake_case_ :List[str] = max(_lowercase )
while placement <= max_digit:
# declare and initialize empty buckets
snake_case_ :list[list] = [[] for _ in range(_lowercase )]
# split list_of_ints between the buckets
for i in list_of_ints:
snake_case_ :Any = int((i / placement) % RADIX )
buckets[tmp].append(_lowercase )
# put each buckets' contents into list_of_ints
snake_case_ :Optional[Any] = 0
for b in range(_lowercase ):
for i in buckets[b]:
snake_case_ :Union[str, Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 66 | 1 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
"""simple docstring"""
_enforce_args(_lowerCAmelCase , _lowerCAmelCase )
if n == 0:
return 0
A : Optional[Any] = float("""-inf""" )
for i in range(1 , n + 1 ):
A : str = max(
_lowerCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , _lowerCAmelCase ) )
return max_revue
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
"""simple docstring"""
_enforce_args(_lowerCAmelCase , _lowerCAmelCase )
A : str = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
A : str = float("""-inf""" )
for i in range(1 , n + 1 ):
A : Tuple = max(
_lowerCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _lowerCAmelCase , _lowerCAmelCase ) , )
A : Dict = max_revenue
return max_rev[n]
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
_enforce_args(_lowerCAmelCase , _lowerCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
A : List[Any] = 0
for i in range(1 , n + 1 ):
A : List[Any] = max_rev[i]
for j in range(1 , i + 1 ):
A : Union[str, Any] = max(_lowerCAmelCase , prices[j - 1] + max_rev[i - j] )
A : List[str] = max_revenue_i
return max_rev[n]
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if n < 0:
A : str = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(_lowerCAmelCase )
if n > len(_lowerCAmelCase ):
A : Union[str, Any] = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(_lowerCAmelCase )}'''
)
raise ValueError(_lowerCAmelCase )
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : Optional[Any] = [6, 10, 12, 15, 20, 23]
A : Optional[int] = len(_lowerCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
A : Any = 36
A : str = top_down_cut_rod(_lowerCAmelCase , _lowerCAmelCase )
A : Optional[int] = bottom_up_cut_rod(_lowerCAmelCase , _lowerCAmelCase )
A : str = naive_cut_rod_recursive(_lowerCAmelCase , _lowerCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 115 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : str = tempfile.mkdtemp()
# fmt: off
A : List[Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A : Optional[int] = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : Optional[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A : Union[str, Any] = {"""unk_token""": """<unk>"""}
A : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
A : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
A : int = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A : List[Any] = os.path.join(self.tmpdirname, lowerCamelCase__ )
with open(self.image_processor_file, """w""", encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token="""!""", **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token="""!""", **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : str = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Optional[int] = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.get_tokenizer()
A : Optional[Any] = self.get_rust_tokenizer()
A : Optional[int] = self.get_image_processor()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase__ )
A : Tuple = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
A : str = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Tuple = self.get_image_processor(do_normalize=lowerCamelCase__ )
A : Optional[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : str = self.get_tokenizer()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = self.prepare_image_inputs()
A : Optional[Any] = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Any = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : int = self.get_image_processor()
A : Optional[Any] = self.get_tokenizer()
A : Optional[int] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = """lower newer"""
A : Union[str, Any] = processor(text=lowerCamelCase__, return_tensors="""np""" )
A : str = tokenizer(lowerCamelCase__, return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist() )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : List[str] = """lower newer"""
A : Any = self.prepare_image_inputs()
A : Tuple = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : str = """google/owlvit-base-patch32"""
A : Dict = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : str = ["""cat""", """nasa badge"""]
A : Optional[int] = processor(text=lowerCamelCase__ )
A : Any = 16
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Tuple = """google/owlvit-base-patch32"""
A : Any = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : int = [["""cat""", """nasa badge"""], ["""person"""]]
A : List[Any] = processor(text=lowerCamelCase__ )
A : Dict = 16
A : List[str] = len(lowerCamelCase__ )
A : List[str] = max([len(lowerCamelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Dict = """google/owlvit-base-patch32"""
A : int = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : str = ["""cat""", """nasa badge"""]
A : Optional[Any] = processor(text=lowerCamelCase__ )
A : int = 16
A : Optional[Any] = inputs["""input_ids"""]
A : Optional[int] = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (2, seq_length) )
self.assertListEqual(list(input_ids[0] ), predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ), predicted_ids[1] )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Optional[Any] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : Optional[Any] = self.prepare_image_inputs()
A : List[str] = processor(images=lowerCamelCase__, query_images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Any = self.get_image_processor()
A : Optional[Any] = self.get_tokenizer()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[Any] = processor.batch_decode(lowerCamelCase__ )
A : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
| 115 | 1 |
"""simple docstring"""
from manim import *
class __A (snake_case__):
'''simple docstring'''
def lowerCAmelCase ( self : str ) ->List[str]:
"""simple docstring"""
snake_case_ = Rectangle(height=0.5 , width=0.5 )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
snake_case_ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
snake_case_ = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
snake_case_ = Text("""CPU""" , font_size=24 )
snake_case_ = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
snake_case_ = [mem.copy() for i in range(1 )]
snake_case_ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
snake_case_ = Text("""GPU""" , font_size=24 )
snake_case_ = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase , _lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
snake_case_ = Text("""Model""" , font_size=24 )
snake_case_ = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase , run_time=1 ) , Create(_lowerCAmelCase , run_time=1 ) , Create(_lowerCAmelCase , run_time=1 ) , )
snake_case_ = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
snake_case_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase , run_time=2.5 ) , Write(_lowerCAmelCase ) , Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
snake_case_ = []
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(_lowerCAmelCase ):
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase , opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
snake_case_ = 0.46 / 4
snake_case_ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowerCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowerCAmelCase , buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase , run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 347 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =0
@slow
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(_lowerCAmelCase) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(_lowerCAmelCase) , 0)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 2_0)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =AutoConfig.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
# Check that tokenizer_type ≠ model_type
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_lowerCAmelCase , 'vocab.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='bert' , use_fast=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_lowerCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_lowerCAmelCase , 'merges.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='gpt2' , use_fast=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
@require_tokenizers
def __lowerCamelCase ( self : int):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_lowerCAmelCase , 'vocab.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='bert')
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_lowerCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_lowerCAmelCase , 'merges.txt'))
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , tokenizer_type='gpt2')
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
with pytest.raises(_lowerCAmelCase):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx')
@require_tokenizers
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowercase =tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased')
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
if isinstance(_lowerCAmelCase , _lowerCAmelCase):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _lowerCAmelCase)
else:
self.assertEqual(tokenizer.do_lower_case , _lowerCAmelCase)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
@require_tokenizers
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_lowerCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__lowercase =tokenizer_class.from_pretrained('julien-c/herlolip-not-exists')
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =TOKENIZER_MAPPING.values()
__lowercase =[]
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_lowerCAmelCase)
@require_tokenizers
def __lowerCamelCase ( self : int):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_lowerCAmelCase) , _lowerCAmelCase)
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased') , _lowerCAmelCase)
@require_tokenizers
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_lowerCAmelCase)
__lowercase ='Hello, world. How are you?'
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
__lowercase =AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_lowerCAmelCase)
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
@require_tokenizers
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config')
self.assertEqual(type(_lowerCAmelCase) , _lowerCAmelCase)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0)
self.assertEqual(tokenizer.unk_token , '[UNK]')
self.assertEqual(tokenizer.padding_side , 'right')
self.assertEqual(tokenizer.truncation_side , 'right')
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 1_2)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('ctrl')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =get_tokenizer_config('bert-base-cased')
__lowercase =config.pop('_commit_hash' , _lowerCAmelCase)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_lowerCAmelCase , {'do_lower_case': False})
# This model does not have a tokenizer_config so we get back an empty dict.
__lowercase =get_tokenizer_config(_lowerCAmelCase)
self.assertDictEqual(_lowerCAmelCase , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =get_tokenizer_config(_lowerCAmelCase)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer')
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
try:
AutoConfig.register('custom' , _lowerCAmelCase)
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase):
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
__lowercase =CustomTokenizer.from_pretrained(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __lowerCamelCase ( self : int):
'''simple docstring'''
try:
AutoConfig.register('custom' , _lowerCAmelCase)
# Can register in two steps
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase):
AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase =BertTokenizerFast.from_pretrained(_lowerCAmelCase)
bert_tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =CustomTokenizerFast.from_pretrained(_lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
with self.assertRaises(_lowerCAmelCase):
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase):
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCAmelCase)
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
@require_tokenizers
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = False
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = NewTokenizer
lowerCAmelCase__ = False
try:
AutoConfig.register('custom' , _lowerCAmelCase)
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase)
AutoTokenizer.register(_lowerCAmelCase , fast_tokenizer_class=_lowerCAmelCase)
# If remote code is not set, the default is to use local
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertTrue(tokenizer.special_attribute_present)
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__lowercase =AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
with self.assertRaisesRegex(
_lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier'):
__lowercase =AutoTokenizer.from_pretrained('bert-base')
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase , revision='aaaaaa')
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__lowercase =AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 166 | 0 |
"""simple docstring"""
import os
def _snake_case ( ) -> List[str]:
'''simple docstring'''
with open(os.path.dirname(_snake_case ) + '/grid.txt' ) as f:
_A = [] # noqa: E741
for _ in range(20 ):
l.append([int(_snake_case ) for x in f.readline().split()] )
_A = 0
# right
for i in range(20 ):
for j in range(17 ):
_A = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_A = temp
# down
for i in range(17 ):
for j in range(20 ):
_A = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_A = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_A = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_A = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_A = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_A = temp
return maximum
if __name__ == "__main__":
print(solution())
| 355 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def _snake_case ( _snake_case : str , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Tuple=None ) -> List[str]:
'''simple docstring'''
_A = XLNetConfig.from_json_file(_snake_case )
_A = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
_A = finetuning_task
_A = GLUE_TASKS_NUM_LABELS[finetuning_task]
_A = XLNetForSequenceClassification(_snake_case )
elif "squad" in finetuning_task:
_A = finetuning_task
_A = XLNetForQuestionAnswering(_snake_case )
else:
_A = XLNetLMHeadModel(_snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
_A = os.path.join(_snake_case , _snake_case )
_A = os.path.join(_snake_case , _snake_case )
print(F'''Save PyTorch model to {os.path.abspath(_snake_case )}''' )
torch.save(model.state_dict() , _snake_case )
print(F'''Save configuration file to {os.path.abspath(_snake_case )}''' )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
a = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 271 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
class snake_case_ ( A__ ):
__A : str = ["pixel_values"]
def __init__( self : Union[str, Any] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : int = 0.9 , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Tuple , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
lowercase__ : str = size if size is not None else {'shortest_edge': 2_24}
lowercase__ : List[str] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowercase__ : str = get_size_dict(lowerCamelCase_ , param_name="crop_size" )
lowercase__ : Tuple = do_resize
lowercase__ : List[Any] = size
lowercase__ : List[str] = crop_pct
lowercase__ : Union[str, Any] = resample
lowercase__ : List[str] = do_center_crop
lowercase__ : List[str] = crop_size
lowercase__ : List[Any] = do_rescale
lowercase__ : List[str] = rescale_factor
lowercase__ : Tuple = do_normalize
lowercase__ : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[float] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> str:
lowercase__ : Union[str, Any] = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase__ : Optional[int] = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase__ : Union[str, Any] = int(size["height"] / crop_pct )
else:
lowercase__ : Any = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(lowerCamelCase_ ) )
lowercase__ : Tuple = get_resize_output_image_size(lowerCamelCase_ , size=lowerCamelCase_ , default_to_square=lowerCamelCase_ )
else:
if "shortest_edge" in size:
lowercase__ : str = get_resize_output_image_size(lowerCamelCase_ , size=size["shortest_edge"] , default_to_square=lowerCamelCase_ )
elif "height" in size and "width" in size:
lowercase__ : Union[str, Any] = (size['height'], size['width'])
else:
raise ValueError("Invalid size for resize: {}".format(lowerCamelCase_ ) )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCamelCase ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> Union[str, Any]:
lowercase__ : Tuple = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCamelCase_ , size=(size["height"], size["width"]) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[str] , ) -> Any:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCamelCase ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> Tuple:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCamelCase ( self : Any , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : int = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : List[str] , ) -> str:
lowercase__ : Dict = do_resize if do_resize is not None else self.do_resize
lowercase__ : Union[str, Any] = crop_pct if crop_pct is not None else self.crop_pct
lowercase__ : Tuple = resample if resample is not None else self.resample
lowercase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : List[str] = image_mean if image_mean is not None else self.image_mean
lowercase__ : List[Any] = image_std if image_std is not None else self.image_std
lowercase__ : int = size if size is not None else self.size
lowercase__ : Tuple = get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : str = get_size_dict(lowerCamelCase_ , param_name="crop_size" )
lowercase__ : Dict = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowercase__ : List[str] = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
lowercase__ : Tuple = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , crop_pct=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
lowercase__ : Union[str, Any] = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
lowercase__ : str = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowercase__ : Optional[Any] = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowercase__ : Optional[Any] = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowercase__ : List[str] = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 87 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase = logging.get_logger(__name__)
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""pixel_values"""]
def __init__( self :Union[str, Any] , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :int = 0.9 , lowerCamelCase_ :PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ :bool = True , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :Union[int, float] = 1 / 255 , lowerCamelCase_ :bool = True , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , **lowerCamelCase_ :Tuple , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : str =size if size is not None else {'shortest_edge': 224}
lowerCamelCase__ : List[str] =get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase__ : str =get_size_dict(lowerCamelCase_ , param_name='crop_size' )
lowerCamelCase__ : Tuple =do_resize
lowerCamelCase__ : List[Any] =size
lowerCamelCase__ : List[str] =crop_pct
lowerCamelCase__ : Union[str, Any] =resample
lowerCamelCase__ : List[str] =do_center_crop
lowerCamelCase__ : List[str] =crop_size
lowerCamelCase__ : List[Any] =do_rescale
lowerCamelCase__ : List[str] =rescale_factor
lowerCamelCase__ : Tuple =do_normalize
lowerCamelCase__ : int =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase__ : List[Any] =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :Optional[float] = None , lowerCamelCase_ :PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :Any , ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCamelCase__ : Optional[int] =int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCamelCase__ : Union[str, Any] =int(size['height'] / crop_pct )
else:
lowerCamelCase__ : Any =(int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(lowerCamelCase_ ) )
lowerCamelCase__ : Tuple =get_resize_output_image_size(lowerCamelCase_ , size=lowerCamelCase_ , default_to_square=lowerCamelCase_ )
else:
if "shortest_edge" in size:
lowerCamelCase__ : str =get_resize_output_image_size(lowerCamelCase_ , size=size['shortest_edge'] , default_to_square=lowerCamelCase_ )
elif "height" in size and "width" in size:
lowerCamelCase__ : Union[str, Any] =(size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(lowerCamelCase_ ) )
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Dict[str, int] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :str , ):
"""simple docstring"""
lowerCamelCase__ : Tuple =get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(lowerCamelCase_ , size=(size['height'], size['width']) , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Union[int, float] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :List[str] , ):
"""simple docstring"""
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :np.ndarray , lowerCamelCase_ :Union[float, List[float]] , lowerCamelCase_ :Union[float, List[float]] , lowerCamelCase_ :Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase_ :Tuple , ):
"""simple docstring"""
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Any , lowerCamelCase_ :ImageInput , lowerCamelCase_ :bool = None , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :int = None , lowerCamelCase_ :PILImageResampling = None , lowerCamelCase_ :bool = None , lowerCamelCase_ :Dict[str, int] = None , lowerCamelCase_ :bool = None , lowerCamelCase_ :float = None , lowerCamelCase_ :bool = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[float, List[float]]] = None , lowerCamelCase_ :Optional[Union[str, TensorType]] = None , lowerCamelCase_ :ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase_ :List[str] , ):
"""simple docstring"""
lowerCamelCase__ : Dict =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Union[str, Any] =crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase__ : Tuple =resample if resample is not None else self.resample
lowerCamelCase__ : Any =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Optional[int] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : List[str] =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : List[Any] =image_std if image_std is not None else self.image_std
lowerCamelCase__ : int =size if size is not None else self.size
lowerCamelCase__ : Tuple =get_size_dict(lowerCamelCase_ , default_to_square=lowerCamelCase_ )
lowerCamelCase__ : Dict =crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : str =get_size_dict(lowerCamelCase_ , param_name='crop_size' )
lowerCamelCase__ : Dict =make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : List[str] =[to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
lowerCamelCase__ : Tuple =[self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , crop_pct=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_center_crop:
lowerCamelCase__ : Union[str, Any] =[self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCamelCase__ : str =[self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCamelCase__ : Optional[Any] =[self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowerCamelCase__ : Optional[Any] =[to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowerCamelCase__ : List[str] ={'pixel_values': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_ )
| 126 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = """▁"""
lowerCAmelCase : int = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCAmelCase : int = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
lowerCAmelCase : Optional[Any] = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
lowerCAmelCase : Union[str, Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : List[int] = []
_UpperCAmelCase : List[int] = []
def __init__( self : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Optional[int]="</s>" , lowerCAmelCase__ : Union[str, Any]="</s>" , lowerCAmelCase__ : List[str]="<s>" , lowerCAmelCase__ : Union[str, Any]="<unk>" , lowerCAmelCase__ : Optional[int]="<pad>" , lowerCAmelCase__ : Dict="<mask>" , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
SCREAMING_SNAKE_CASE_: Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_: int = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: int = len(self.sp_model)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__)
}
SCREAMING_SNAKE_CASE_: Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE_: int = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE_: Tuple = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
SCREAMING_SNAKE_CASE_: Optional[Any] = src_lang if src_lang is not None else "en_XX"
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE_: Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Any):
SCREAMING_SNAKE_CASE_: Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE_: Optional[int] = None
SCREAMING_SNAKE_CASE_: str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
SCREAMING_SNAKE_CASE_: List[str] = {}
SCREAMING_SNAKE_CASE_: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
return self._src_lang
@src_lang.setter
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = [1] * len(self.prefix_tokens)
SCREAMING_SNAKE_CASE_: Optional[int] = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__)) + ([0] * len(lowerCAmelCase__)) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[str] , **lowerCAmelCase__ : Optional[Any]):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
SCREAMING_SNAKE_CASE_: str = src_lang
SCREAMING_SNAKE_CASE_: Any = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = self.convert_tokens_to_ids(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = tgt_lang_id
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Any = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_: Any = self.sp_model.PieceToId(lowerCAmelCase__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Union[str, Any]):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: Tuple = "".join(lowerCAmelCase__).replace(lowerCAmelCase__ , " ").strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__ , "wb") as fi:
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str = "en_XX" , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : str = "ro_RO" , **lowerCAmelCase__ : Optional[Any] , ):
SCREAMING_SNAKE_CASE_: str = src_lang
SCREAMING_SNAKE_CASE_: Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self.set_src_lang_special_tokens(self.src_lang)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: List[str] = [self.eos_token_id, self.cur_lang_code]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[str] = self.lang_code_to_id[lang]
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.eos_token_id, self.cur_lang_code]
| 127 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : Optional[int] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ["""YolosFeatureExtractor"""]
lowerCAmelCase : Tuple = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 127 | 1 |
import math
def A (__A : float , __A : float ) -> float:
"""simple docstring"""
return math.pow(__A , 2 ) - a
def A (__A : float ) -> float:
"""simple docstring"""
return 2 * x
def A (__A : float ) -> float:
"""simple docstring"""
UpperCAmelCase_ = 2.0
while start <= a:
UpperCAmelCase_ = math.pow(__A , 2 )
return start
def A (__A : float , __A : int = 9999 , __A : float = 0.00_000_000_000_001 ) -> float:
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
UpperCAmelCase_ = get_initial_point(__A )
for _ in range(__A ):
UpperCAmelCase_ = value
UpperCAmelCase_ = value - fx(__A , __A ) / fx_derivative(__A )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 51 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A ( _lowerCAmelCase=32 , _lowerCAmelCase=10 , _lowerCAmelCase=100 , _lowerCAmelCase=1_026 , _lowerCAmelCase=True , _lowerCAmelCase="data/tokenized_stories_train_wikitext103.jbl" , _lowerCAmelCase="igf_context_pairs.jbl" , ):
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
__lowercase , __lowercase =generate_datasets(
_lowerCAmelCase , _lowerCAmelCase , number=_lowerCAmelCase , min_len=1_026 , trim=_lowerCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__lowercase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
__lowercase =load_gpta('gpt2' ).to(_lowerCAmelCase )
print('computing perplexity on objective set' )
__lowercase =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).item()
print('perplexity on objective set:' , _lowerCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A ( _lowerCAmelCase , _lowerCAmelCase=15 , _lowerCAmelCase=128 , _lowerCAmelCase=100 , _lowerCAmelCase="igf_model.pt" , ):
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
__lowercase =GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
__lowercase =SecondaryLearner(_lowerCAmelCase )
# Train secondary learner
__lowercase =train_secondary_learner(
_lowerCAmelCase , _lowerCAmelCase , max_epochs=_lowerCAmelCase , batch_size=_lowerCAmelCase , eval_freq=100 , igf_model_path=_lowerCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=32 , _lowerCAmelCase=1_000 , _lowerCAmelCase=16 , _lowerCAmelCase=1.0 , _lowerCAmelCase=recopy_gpta , _lowerCAmelCase=None , _lowerCAmelCase=10 , _lowerCAmelCase="gpt2_finetuned.pt" , ):
"""simple docstring"""
__lowercase =torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
__lowercase =RandomSampler(_lowerCAmelCase )
__lowercase =DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase )
__lowercase =max_steps // (len(_lowerCAmelCase )) + 1
__lowercase =0
__lowercase =torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCAmelCase )
__lowercase , __lowercase , __lowercase =recopy_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCAmelCase )
secondary_learner.eval()
__lowercase =[]
__lowercase =0
__lowercase =[]
__lowercase =[]
# Compute the performance of the transformer model at the beginning
__lowercase =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
test_perps.append(_lowerCAmelCase )
print('Test perplexity, step' , _lowerCAmelCase , ':' , _lowerCAmelCase )
for epoch in range(int(_lowerCAmelCase ) ):
for step, example in enumerate(_lowerCAmelCase ):
torch.cuda.empty_cache()
__lowercase =random.randint(0 , example.size(2 ) - context_len - 1 )
__lowercase =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__lowercase =model(_lowerCAmelCase , labels=_lowerCAmelCase )
__lowercase =True
if secondary_learner is not None:
__lowercase =secondary_learner.forward(
torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_lowerCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__lowercase =-1
if predicted_q < threshold:
__lowercase =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__lowercase =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__lowercase =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__lowercase =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
test_perps.append(_lowerCAmelCase )
print('Test perplexity, step' , _lowerCAmelCase , ':' , _lowerCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A ( ):
"""simple docstring"""
__lowercase =argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=_lowerCAmelCase , default=_lowerCAmelCase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=_lowerCAmelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=_lowerCAmelCase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=_lowerCAmelCase , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1_000 , type=_lowerCAmelCase , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=_lowerCAmelCase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=_lowerCAmelCase , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=_lowerCAmelCase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=_lowerCAmelCase , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1_026 , type=_lowerCAmelCase , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=_lowerCAmelCase , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=_lowerCAmelCase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=_lowerCAmelCase , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=_lowerCAmelCase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
__lowercase =joblib.load('data/IGF_values.jbl' )
# Train secondary learner
__lowercase =training_secondary_learner(
_lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
__lowercase =GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__lowercase , __lowercase =generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1_026 , trim=_lowerCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCAmelCase , secondary_learner=_lowerCAmelCase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 166 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = 2 , lowerCAmelCase = 1 , lowerCAmelCase = 3 , ):
'''simple docstring'''
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
return (pow(lowerCAmelCase , 2 ) + step) % modulus
for _ in range(lowerCAmelCase ):
# These track the position within the cycle detection logic.
UpperCAmelCase = seed
UpperCAmelCase = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
UpperCAmelCase = rand_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = rand_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = rand_fn(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
UpperCAmelCase = gcd(hare - tortoise , lowerCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
UpperCAmelCase = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Any = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'{args.num} is probably prime')
else:
lowerCAmelCase_ : Optional[Any] = args.num // divisor
print(F'{args.num} = {divisor} * {quotient}')
| 357 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : str = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 248 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = torch.device('cpu')
def lowerCamelCase ( ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Any = dct.pop(_UpperCamelCase )
__UpperCAmelCase : List[str] = val
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = []
for k in state_dict.keys():
__UpperCAmelCase : int = k
if ".pwconv" in k:
__UpperCAmelCase : Optional[int] = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
__UpperCAmelCase : List[Any] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
__UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
__UpperCAmelCase : int = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
__UpperCAmelCase : str = k_new.split(""".""" )
if ls[2].isdigit():
__UpperCAmelCase : Dict = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
__UpperCAmelCase : List[str] = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCAmelCase : Optional[int] = 1_0_0_0
__UpperCAmelCase : Optional[int] = """huggingface/label-files"""
__UpperCAmelCase : str = """imagenet-1k-id2label.json"""
__UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase : Any = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[Any] = idalabel
__UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__UpperCAmelCase : Tuple = [3, 3, 6, 4]
__UpperCAmelCase : List[Any] = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
__UpperCAmelCase : Optional[int] = [3, 3, 9, 6]
__UpperCAmelCase : Tuple = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
__UpperCAmelCase : Dict = [4, 3, 1_0, 5]
__UpperCAmelCase : Optional[Any] = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
__UpperCAmelCase : str = [4, 4, 1_2, 6]
__UpperCAmelCase : str = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="""cpu""" , check_hash=_UpperCamelCase )
else:
__UpperCAmelCase : Dict = torch.load(_UpperCamelCase , map_location="""cpu""" )
__UpperCAmelCase : str = checkpoint
__UpperCAmelCase : Union[str, Any] = create_rename_keys(_UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
__UpperCAmelCase : int = SwiftFormerForImageClassification(_UpperCamelCase ).eval()
hf_model.load_state_dict(_UpperCamelCase )
# prepare test inputs
__UpperCAmelCase : List[Any] = prepare_img()
__UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
__UpperCAmelCase : Optional[Any] = processor(images=_UpperCamelCase , return_tensors="""pt""" )
# compare outputs from both models
__UpperCAmelCase : Tuple = get_expected_output(_UpperCamelCase )
__UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _UpperCamelCase , atol=1E-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
UpperCAmelCase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 115 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
UpperCAmelCase : Any = get_logger(__name__)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : int=None ):
'''simple docstring'''
__UpperCAmelCase : Tuple = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) )
__UpperCAmelCase : Any = module._original_module if isinstance(UpperCamelCase , _PatchedModuleObj ) else module
class lowerCamelCase__ :
"""simple docstring"""
__a = []
def __init__( self : str , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any]=None ):
'''simple docstring'''
__UpperCAmelCase : int = obj
__UpperCAmelCase : Union[str, Any] = target
__UpperCAmelCase : List[str] = new
__UpperCAmelCase : Optional[int] = target.split(""".""" )[0]
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Union[str, Any] = attrs or []
def __enter__( self : Dict ):
'''simple docstring'''
*__UpperCAmelCase ,__UpperCAmelCase : str = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(UpperCamelCase ) ):
try:
__UpperCAmelCase : List[Any] = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__UpperCAmelCase : List[Any] = getattr(self.obj , UpperCamelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(UpperCamelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__UpperCAmelCase : Tuple = obj_attr
# patch at top level
setattr(self.obj , UpperCamelCase , _PatchedModuleObj(UpperCamelCase , attrs=self.attrs ) )
__UpperCAmelCase : int = getattr(self.obj , UpperCamelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(UpperCamelCase , UpperCamelCase , _PatchedModuleObj(getattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , attrs=self.attrs ) )
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , UpperCamelCase )
# finally set the target attribute
setattr(UpperCamelCase , UpperCamelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__UpperCAmelCase : int = getattr(import_module(""".""".join(UpperCamelCase ) ) , UpperCamelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , UpperCamelCase ) is attr_value:
__UpperCAmelCase : Union[str, Any] = getattr(self.obj , UpperCamelCase )
setattr(self.obj , UpperCamelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__UpperCAmelCase : str = globals()["""__builtins__"""][target_attr]
setattr(self.obj , UpperCamelCase , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : str , *UpperCamelCase : Optional[int] ):
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , UpperCamelCase , self.original.pop(UpperCamelCase ) )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 115 | 1 |
'''simple docstring'''
from manim import *
class A ( snake_case_ ):
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : str = Rectangle(height=0.5 , width=0.5 )
A : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A : int = [mem.copy() for i in range(6 )]
A : List[Any] = [mem.copy() for i in range(6 )]
A : Dict = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : List[str] = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : int = VGroup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : int = Text('''CPU''' , font_size=24 )
A : List[str] = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = [mem.copy() for i in range(1 )]
A : Dict = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : Union[str, Any] = Text('''GPU''' , font_size=24 )
A : Optional[Any] = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
gpu.align_to(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(SCREAMING_SNAKE_CASE )
A : int = [mem.copy() for i in range(6 )]
A : int = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 )
A : Tuple = Text('''Model''' , font_size=24 )
A : Dict = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(SCREAMING_SNAKE_CASE , run_time=1 ) , Create(SCREAMING_SNAKE_CASE , run_time=1 ) , Create(SCREAMING_SNAKE_CASE , run_time=1 ) , )
A : Union[str, Any] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
A : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A : Dict = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE , run_time=2.5 ) , Write(SCREAMING_SNAKE_CASE ) , Write(SCREAMING_SNAKE_CASE ) )
self.add(SCREAMING_SNAKE_CASE )
A : Tuple = []
A : List[str] = []
A : Tuple = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE ):
A : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE , opacity=0.7 )
cpu_target.move_to(SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
A : int = 0.46 / 4
A : Tuple = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE , buff=0.0 )
cpu_targs.append(SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE )
self.play(*SCREAMING_SNAKE_CASE )
self.wait()
| 356 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
A : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , )
A : Optional[Any] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
| 311 | 0 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Any, a_: int = 16, a_: int = 88, a_: Optional[int] = None, a_: int = 1, a_: float = 0.0, a_: int = 32, a_: Optional[int] = None, a_: bool = False, a_: Optional[int] = None, a_: Optional[int] = None, a_: str = "geglu", a_: Optional[int] = None, ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=a_, attention_head_dim=a_, in_channels=a_, num_layers=a_, dropout=a_, norm_num_groups=a_, cross_attention_dim=a_, attention_bias=a_, sample_size=a_, num_vector_embeds=a_, activation_fn=a_, num_embeds_ada_norm=a_, )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case : Optional[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case : str = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case : Any = [1, 0]
def UpperCamelCase_ ( self: Any, a_: Optional[int], a_: Optional[Any], a_: int=None, a_: str=None, a_: List[str]=None, a_: bool = True, ):
'''simple docstring'''
_snake_case : List[str] = hidden_states
_snake_case : Tuple = []
_snake_case : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case : str = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case : Any = self.transformer_index_for_condition[i]
_snake_case : int = self.transformers[transformer_index](
a_, encoder_hidden_states=a_, timestep=a_, cross_attention_kwargs=a_, return_dict=a_, )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case : List[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=a_ )
| 64 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Dict = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : Dict = '''default_config.yaml'''
__UpperCAmelCase : Optional[Any] = config_folder / config_file
__UpperCAmelCase : Dict = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : Any = Path('''tests/test_configs''' )
@classmethod
def __lowercase ( cls : int ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] ,env=os.environ.copy() )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_a ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_a ), self.test_file_path] ,env=os.environ.copy() )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'] ,env=os.environ.copy() )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''test-tpu'''
__UpperCAmelCase : Any = '''us-central1-a'''
__UpperCAmelCase : List[Any] = '''ls'''
__UpperCAmelCase : Any = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] ,return_stdout=_a )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[Any] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_a ,)
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[str] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" ,_a ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Any = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
| 271 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[str] = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( a__ ):
'''simple docstring'''
__lowerCamelCase : List[Any] = "efficientnet"
def __init__( self, lowerCamelCase__ = 3, lowerCamelCase__ = 600, lowerCamelCase__ = 2.0, lowerCamelCase__ = 3.1, lowerCamelCase__ = 8, lowerCamelCase__ = [3, 3, 5, 3, 5, 5, 3], lowerCamelCase__ = [32, 16, 24, 40, 80, 112, 192], lowerCamelCase__ = [16, 24, 40, 80, 112, 192, 320], lowerCamelCase__ = [], lowerCamelCase__ = [1, 2, 2, 2, 1, 2, 1], lowerCamelCase__ = [1, 2, 2, 3, 3, 4, 1], lowerCamelCase__ = [1, 6, 6, 6, 6, 6, 6], lowerCamelCase__ = 0.25, lowerCamelCase__ = "swish", lowerCamelCase__ = 2560, lowerCamelCase__ = "mean", lowerCamelCase__ = 0.02, lowerCamelCase__ = 0.001, lowerCamelCase__ = 0.99, lowerCamelCase__ = 0.5, lowerCamelCase__ = 0.2, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
A : Dict = num_channels
A : str = image_size
A : Any = width_coefficient
A : Any = depth_coefficient
A : Any = depth_divisor
A : Optional[Any] = kernel_sizes
A : Union[str, Any] = in_channels
A : List[Any] = out_channels
A : Optional[Any] = depthwise_padding
A : int = strides
A : int = num_block_repeats
A : Optional[Any] = expand_ratios
A : int = squeeze_expansion_ratio
A : Any = hidden_act
A : Optional[Any] = hidden_dim
A : Union[str, Any] = pooling_type
A : Optional[Any] = initializer_range
A : Tuple = batch_norm_eps
A : Optional[int] = batch_norm_momentum
A : Any = dropout_rate
A : List[Any] = drop_connect_rate
A : int = sum(lowerCamelCase__ ) * 4
class SCREAMING_SNAKE_CASE__ ( a__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = version.parse("1.11" )
@property
def _lowerCAmelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowerCAmelCase ( self ):
return 1e-5
| 370 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : str = tempfile.mkdtemp()
A : Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
A : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
A : Any = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"""do_convert_rgb""": True,
}
A : int = os.path.join(self.tmpdirname, lowerCamelCase__ )
with open(self.image_processor_file, """w""", encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Optional[int] = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_tokenizer()
A : Dict = self.get_rust_tokenizer()
A : List[Any] = self.get_image_processor()
A : int = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
A : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase__ )
A : str = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
A : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : str = ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[int] = self.get_tokenizer(cls_token="""(CLS)""", sep_token="""(SEP)""" )
A : Optional[int] = self.get_image_processor(do_normalize=lowerCamelCase__ )
A : Optional[int] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname, cls_token="""(CLS)""", sep_token="""(SEP)""", do_normalize=lowerCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_image_processor()
A : List[str] = self.get_tokenizer()
A : List[str] = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : Union[str, Any] = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : str = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : Tuple = self.get_tokenizer()
A : str = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = """Alexandra,T-shirt的价格是15便士。"""
A : Optional[Any] = processor(text=lowerCamelCase__ )
A : int = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : Dict = self.get_image_processor()
A : List[str] = self.get_tokenizer()
A : int = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : str = """Alexandra,T-shirt的价格是15便士。"""
A : Dict = self.prepare_image_inputs()
A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.get_image_processor()
A : List[str] = self.get_tokenizer()
A : List[str] = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : List[Any] = processor.batch_decode(lowerCamelCase__ )
A : str = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : List[str] = self.get_tokenizer()
A : Any = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : List[Any] = """Alexandra,T-shirt的价格是15便士。"""
A : Optional[Any] = self.prepare_image_inputs()
A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 115 | 0 |
_SCREAMING_SNAKE_CASE : Dict = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 127 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__snake_case ) , torch_builtin(__snake_case ) ) )
self.assertFalse(torch.allclose(gelu_python(__snake_case ) , gelu_new(__snake_case ) ) )
def a_ ( self ):
snake_case = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case = get_activation('''gelu''' )
snake_case = get_activation('''gelu_10''' )
snake_case = torch_builtin(__snake_case )
snake_case = geluaa(__snake_case )
snake_case = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__snake_case ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def a_ ( self ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__snake_case ):
get_activation('''bogus''' )
with self.assertRaises(__snake_case ):
get_activation(__snake_case )
def a_ ( self ):
snake_case = get_activation('''gelu''' )
snake_case = 1
snake_case = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__snake_case ):
snake_case = acta.a
| 127 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__UpperCamelCase = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__UpperCamelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
__UpperCamelCase = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
__UpperCamelCase = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
__UpperCamelCase = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
__UpperCamelCase = tf.keras.preprocessing.image.img_to_array(test_image)
__UpperCamelCase = np.expand_dims(test_image, axis=0)
__UpperCamelCase = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__UpperCamelCase = '''Normal'''
if result[0][0] == 1:
__UpperCamelCase = '''Abnormality detected'''
| 38 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
a : Optional[Any] = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ["""ViTFeatureExtractor"""]
a : str = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A__(unittest.TestCase ):
"""simple docstring"""
_A : List[str] = StableDiffusionLDMaDPipeline
_A : int = TEXT_TO_IMAGE_PARAMS
_A : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_A : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a_ : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
a_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ : Tuple = CLIPTextModel(_lowercase )
a_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Any:
if str(_lowercase ).startswith("""mps""" ):
a_ : Optional[Any] = torch.manual_seed(_lowercase )
else:
a_ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Any = self.get_dummy_components()
a_ : List[str] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : Union[str, Any] = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : int = self.get_dummy_inputs(_lowercase )
a_ : List[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : Tuple = output.rgb, output.depth
a_ : Union[str, Any] = rgb[0, -3:, -3:, -1]
a_ : Any = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a_ : Optional[Any] = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
a_ : int = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = self.get_dummy_components()
a_ : Optional[int] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : Optional[Any] = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Dict = self.get_dummy_inputs(_lowercase )
a_ : List[str] = 3 * [inputs["""prompt"""]]
# forward
a_ : Optional[int] = ldmad_pipe(**_lowercase )
a_ , a_ : Any = output.rgb, output.depth
a_ : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
a_ : Union[str, Any] = depth_slice_a[0, -3:, -1]
a_ : Dict = self.get_dummy_inputs(_lowercase )
a_ : List[str] = 3 * [inputs.pop("""prompt""" )]
a_ : List[Any] = ldmad_pipe.tokenizer(
_lowercase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_lowercase , return_tensors="""pt""" , )
a_ : int = text_inputs["""input_ids"""].to(_lowercase )
a_ : Any = ldmad_pipe.text_encoder(_lowercase )[0]
a_ : Dict = prompt_embeds
# forward
a_ : int = ldmad_pipe(**_lowercase )
a_ , a_ : Optional[int] = output.rgb, output.depth
a_ : List[str] = rgb_slice_a[0, -3:, -3:, -1]
a_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase__ ( self ) -> Dict:
a_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Dict = self.get_dummy_components()
a_ : Any = PNDMScheduler(skip_prk_steps=_lowercase )
a_ : List[str] = StableDiffusionLDMaDPipeline(**_lowercase )
a_ : str = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[Any] = self.get_dummy_inputs(_lowercase )
a_ : int = """french fries"""
a_ : Any = ldmad_pipe(**_lowercase , negative_prompt=_lowercase )
a_ , a_ : Optional[Any] = output.rgb, output.depth
a_ : Tuple = rgb[0, -3:, -3:, -1]
a_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a_ : Optional[int] = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
a_ : Union[str, Any] = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ) -> List[str]:
a_ : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Dict = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
a_ : Tuple = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
a_ : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Any:
a_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
a_ : str = ldmad_pipe.to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Dict = self.get_inputs(_lowercase )
a_ : Optional[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : int = output.rgb, output.depth
a_ : str = rgb[0, -3:, -3:, -1].flatten()
a_ : Tuple = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
a_ : Optional[int] = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
a_ : Optional[int] = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self , _lowercase , _lowercase="cpu" , _lowercase=torch.floataa , _lowercase=0 ) -> str:
a_ : List[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Tuple = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
a_ : Any = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
a_ : Dict = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[str] = self.get_inputs(_lowercase )
a_ : Union[str, Any] = ldmad_pipe(**_lowercase )
a_ , a_ : str = output.rgb, output.depth
a_ : List[str] = 0.4_9_5_5_8_6
a_ : int = 0.3_3_7_9_5_5_1_5
a_ : int = 1_1_2.4_8_5_1_8
a_ : Optional[int] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(_lowercase )
ldmad_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[str] = self.get_inputs(_lowercase )
a_ : List[Any] = ldmad_pipe(**_lowercase )
a_ , a_ : List[Any] = output.rgb, output.depth
a_ : int = 0.4_1_9_4_1_2_7
a_ : List[str] = 0.3_5_3_7_5_5_8_6
a_ : Optional[int] = 0.5_6_3_8_5_0_2
a_ : str = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 248 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : dict ):
lowercase_ :int = set()
# edges = list of graph's edges
lowercase_ :List[Any] = get_edges(__lowerCamelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase_ , lowercase_ :Optional[int] = edges.pop()
chosen_vertices.add(__lowerCamelCase )
chosen_vertices.add(__lowerCamelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__lowerCamelCase )
return chosen_vertices
def UpperCAmelCase_ ( __lowerCamelCase : dict ):
lowercase_ :Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 147 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ ( _lowerCAmelCase ):
def __init__( self : Any , lowercase : int , lowercase : Union[str, Any]=13 , lowercase : List[str]=7 , lowercase : List[str]=True , lowercase : int=True , lowercase : Tuple=True , lowercase : int=True , lowercase : List[Any]=99 , lowercase : Optional[int]=32 , lowercase : Dict=5 , lowercase : Optional[int]=4 , lowercase : List[str]=37 , lowercase : Tuple="gelu" , lowercase : List[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : List[str]=512 , lowercase : str=16 , lowercase : Tuple=2 , lowercase : List[Any]=0.02 , lowercase : Dict=False , lowercase : Dict=True , lowercase : int="None" , lowercase : Optional[Any]=3 , lowercase : Dict=4 , lowercase : List[Any]=None , ):
"""simple docstring"""
lowercase_ :int = parent
lowercase_ :str = batch_size
lowercase_ :Tuple = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Dict = use_input_mask
lowercase_ :Any = use_token_type_ids
lowercase_ :Tuple = use_labels
lowercase_ :Dict = vocab_size
lowercase_ :Tuple = hidden_size
lowercase_ :Union[str, Any] = num_hidden_layers
lowercase_ :int = num_attention_heads
lowercase_ :List[Any] = intermediate_size
lowercase_ :Tuple = hidden_act
lowercase_ :str = hidden_dropout_prob
lowercase_ :Any = attention_probs_dropout_prob
lowercase_ :List[Any] = max_position_embeddings
lowercase_ :Union[str, Any] = type_vocab_size
lowercase_ :Union[str, Any] = type_sequence_label_size
lowercase_ :Any = initializer_range
lowercase_ :List[Any] = num_labels
lowercase_ :str = num_choices
lowercase_ :Optional[Any] = relative_attention
lowercase_ :Tuple = position_biased_input
lowercase_ :Union[str, Any] = pos_att_type
lowercase_ :Tuple = scope
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :Union[str, Any] = None
if self.use_input_mask:
lowercase_ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase_ :List[Any] = None
if self.use_token_type_ids:
lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :str = None
lowercase_ :Union[str, Any] = None
lowercase_ :List[str] = None
if self.use_labels:
lowercase_ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ :str = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ :Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Dict = self.get_config()
lowercase_ :Optional[Any] = 300
return config
def lowercase__ ( self : Optional[Any] , lowercase : Dict ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase__ ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str , lowercase : Optional[Any] , lowercase : Optional[int] ):
"""simple docstring"""
lowercase_ :str = DebertaModel(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Optional[int] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )[0]
lowercase_ :Union[str, Any] = model(lowercase , token_type_ids=lowercase )[0]
lowercase_ :Dict = model(lowercase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase__ ( self : Dict , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Tuple , lowercase : Dict , lowercase : Dict , lowercase : str , lowercase : Tuple ):
"""simple docstring"""
lowercase_ :Dict = DebertaForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : List[Any] , lowercase : int , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Dict = self.num_labels
lowercase_ :int = DebertaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Union[str, Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase )
def lowercase__ ( self : List[Any] , lowercase : List[str] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : int ):
"""simple docstring"""
lowercase_ :List[str] = self.num_labels
lowercase_ :Optional[int] = DebertaForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , lowercase : Tuple , lowercase : Any , lowercase : List[Any] , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Any = DebertaForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :List[Any] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) :List[str] = config_and_inputs
lowercase_ :Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = True
__A = False
__A = False
__A = False
__A = False
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :List[Any] = DebertaModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase )
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase )
@slow
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Tuple = DebertaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowercase__ ( self : Dict ):
"""simple docstring"""
pass
@slow
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = DebertaModel.from_pretrained("microsoft/deberta-base" )
lowercase_ :Dict = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowercase_ :Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ :Optional[int] = model(lowercase , attention_mask=lowercase )[0]
# compare the actual values for a slice.
lowercase_ :List[Any] = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 147 | 1 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __UpperCAmelCase ( _lowerCamelCase ):
# to overwrite at feature extractactor specific tests
__lowercase = None
__lowercase = None
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'feature_size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'sampling_rate' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'padding_value' ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , processed_features[input_name] ) ) )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase_ )
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase_ )
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase_ )
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
_snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def lowerCamelCase ( self , lowerCAmelCase_=False ):
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase_ ):
_snake_case = len(input[0] )
for input_slice in input[1:]:
if len(lowerCAmelCase_ ) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if not np.allclose(np.asarray(lowerCAmelCase_ ) , np.asarray(lowerCAmelCase_ ) , atol=1E-3 ):
return False
return True
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase_ )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = self.feat_extract_tester.seq_length_diff
_snake_case = self.feat_extract_tester.max_seq_length + pad_diff
_snake_case = self.feat_extract_tester.min_seq_length
_snake_case = self.feat_extract_tester.batch_size
_snake_case = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[-1] ) )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )
_snake_case = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding='max_length' )[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=lowerCAmelCase_ , return_tensors='np' )
_snake_case = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_snake_case = feat_extract.pad(lowerCAmelCase_ , pad_to_multiple_of=10 )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , pad_to_multiple_of=10 )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase_ )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase_ , return_tensors='np' , )
_snake_case = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
_snake_case = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_snake_case = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def lowerCamelCase ( self , lowerCAmelCase_=False ):
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase_ ):
_snake_case = len(input[0] )
for input_slice in input[1:]:
if len(lowerCAmelCase_ ) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if not np.allclose(np.asarray(lowerCAmelCase_ ) , np.asarray(lowerCAmelCase_ ) , atol=1E-3 ):
return False
return True
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase_ )
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=lowerCAmelCase_ )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) )
_snake_case = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
# truncate to smallest with np
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=lowerCAmelCase_ , )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
_snake_case = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
# truncate to middle
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=lowerCAmelCase_ , return_tensors='np' , )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=lowerCAmelCase_ )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
_snake_case = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(_inputs_are_equal(lowerCAmelCase_ , lowerCAmelCase_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , truncation=lowerCAmelCase_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding='longest' , truncation=lowerCAmelCase_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding='longest' , truncation=lowerCAmelCase_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase_ ):
feat_extract.pad(lowerCAmelCase_ , padding='max_length' , truncation=lowerCAmelCase_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_snake_case = 12
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCAmelCase_ , truncation=lowerCAmelCase_ , )
_snake_case = input_a[input_name]
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCAmelCase_ , )
_snake_case = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_snake_case = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_snake_case = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase_ ) )
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase_ ) )
def lowerCamelCase ( self ):
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase_ )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )[input_name]
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_dict
_snake_case = True
_snake_case = self.feature_extraction_class(**lowerCAmelCase_ )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = [len(lowerCAmelCase_ ) for x in speech_inputs]
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = feat_extract.pad(lowerCAmelCase_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.feat_extract_dict
_snake_case = True
_snake_case = self.feature_extraction_class(**lowerCAmelCase_ )
_snake_case = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case = [len(lowerCAmelCase_ ) for x in speech_inputs]
_snake_case = feat_extract.model_input_names[0]
_snake_case = BatchFeature({input_name: speech_inputs} )
_snake_case = min(lowerCAmelCase_ )
_snake_case = feat_extract.pad(
lowerCAmelCase_ , padding='max_length' , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='np' )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 42 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311 | 0 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowerCAmelCase :
def __init__( self :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[Any]=sys.maxsize ):
'''simple docstring'''
a = """bilinear"""
a = max_size
a = short_edge_length
def __call__( self :Optional[int] , __magic_name__ :Dict ):
'''simple docstring'''
a = []
for img in imgs:
a , a = img.shape[:2]
# later: provide list and randomly choose index for resize
a = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
a = size * 1.0 / min(__magic_name__ , __magic_name__ )
if h < w:
a , a = size, scale * w
else:
a , a = scale * h, size
if max(__magic_name__ , __magic_name__ ) > self.max_size:
a = self.max_size * 1.0 / max(__magic_name__ , __magic_name__ )
a = newh * scale
a = neww * scale
a = int(neww + 0.5 )
a = int(newh + 0.5 )
if img.dtype == np.uinta:
a = Image.fromarray(__magic_name__ )
a = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
a = np.asarray(__magic_name__ )
else:
a = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
a = nn.functional.interpolate(
__magic_name__ , (newh, neww) , mode=self.interp_method , align_corners=__magic_name__ ).squeeze(0 )
img_augs.append(__magic_name__ )
return img_augs
class __lowerCAmelCase :
def __init__( self :Union[str, Any] , __magic_name__ :str ):
'''simple docstring'''
a = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
a = cfg.INPUT.FORMAT
a = cfg.SIZE_DIVISIBILITY
a = cfg.PAD_VALUE
a = cfg.INPUT.MAX_SIZE_TEST
a = cfg.MODEL.DEVICE
a = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a = lambda __magic_name__ : (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :str ):
'''simple docstring'''
a = tuple(max(__magic_name__ ) for s in zip(*[img.shape for img in images] ) )
a = [im.shape[-2:] for im in images]
a = [
nn.functional.pad(
__magic_name__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__magic_name__ , __magic_name__ )
]
return torch.stack(__magic_name__ ), torch.tensor(__magic_name__ )
def __call__( self :str , __magic_name__ :str , __magic_name__ :List[Any]=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(__magic_name__ , __magic_name__ ):
a = [images]
if single_image:
assert len(__magic_name__ ) == 1
for i in range(len(__magic_name__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__magic_name__ , images.pop(__magic_name__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__magic_name__ , torch.as_tensor(img_tensorize(images.pop(__magic_name__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
a = torch.tensor([im.shape[:2] for im in images] )
a = self.aug(__magic_name__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
a = [self.normalizer(__magic_name__ ) for x in images]
# now pad them to do the following operations
a , a = self.pad(__magic_name__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
a = torch.true_divide(__magic_name__ , __magic_name__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
assert torch.isfinite(__lowerCamelCase ).all(), "Box tensor contains infinite or NaN!"
a , a = box_size
tensor[:, 0].clamp_(min=0 , max=__lowerCamelCase )
tensor[:, 1].clamp_(min=0 , max=__lowerCamelCase )
tensor[:, 2].clamp_(min=0 , max=__lowerCamelCase )
tensor[:, 3].clamp_(min=0 , max=__lowerCamelCase )
| 347 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=10 , lowercase_=[10, 20, 30, 40] , lowercase_=[1, 1, 2, 1] , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=3 , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : Optional[int] = num_channels
UpperCAmelCase_ : Union[str, Any] = embeddings_size
UpperCAmelCase_ : str = hidden_sizes
UpperCAmelCase_ : List[Any] = depths
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : Tuple = len(lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values
def UpperCamelCase__ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = FlaxRegNetModel(config=lowercase_ )
UpperCAmelCase_ : List[str] = model(lowercase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : str = FlaxRegNetForImageClassification(config=lowercase_ )
UpperCAmelCase_ : Tuple = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = FlaxRegNetModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
"""simple docstring"""
return
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(lowercase_ )
UpperCAmelCase_ : str = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
UpperCAmelCase_ : List[str] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase_ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : int = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ , **lowercase_ ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : Dict = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : int = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( ):
UpperCAmelCase_ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
UpperCAmelCase_ : str = self.default_image_processor
UpperCAmelCase_ : Optional[Any] = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=lowercase_ , return_tensors="np" )
UpperCAmelCase_ : str = model(**lowercase_ )
# verify the logits
UpperCAmelCase_ : Tuple = (1, 1000)
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase_ : List[str] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 61 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = list(model.children() )[:-2]
__UpperCAmelCase : Tuple = nn.Sequential(*UpperCamelCase )
__UpperCAmelCase : Tuple = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = self.pool(self.model(UpperCamelCase ) )
__UpperCAmelCase : str = torch.flatten(UpperCamelCase , start_dim=2 )
__UpperCAmelCase : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )]
__UpperCAmelCase : Optional[Any] = os.path.dirname(UpperCamelCase )
__UpperCAmelCase : List[Any] = tokenizer
__UpperCAmelCase : List[str] = labels
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = max_seq_length
__UpperCAmelCase : Tuple = transforms
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : int , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Union[str, Any] = sentence[: self.max_seq_length]
__UpperCAmelCase : Tuple = torch.zeros(self.n_classes )
__UpperCAmelCase : str = 1
__UpperCAmelCase : List[str] = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__UpperCAmelCase : Any = self.transforms(UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : int = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [len(row["""sentence"""] ) for row in batch]
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase )
__UpperCAmelCase : int = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
__UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
__UpperCAmelCase : List[str] = input_row["""sentence"""]
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] )
__UpperCAmelCase : Optional[int] = torch.stack([row["""label"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_start_token"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 115 | 0 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , A_ , A_ ) -> Dict:
return f'gaussian_noise_s={seed}_shape={"_".join([str(A_ ) for s in shape] )}.npy'
def __snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __snake_case ( self , A_=0 , A_=(4, 4, 64, 64) , A_=False ) -> List[Any]:
lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(A_ , A_ ) ) , dtype=A_ )
return image
def __snake_case ( self , A_=False , A_="CompVis/stable-diffusion-v1-4" ) -> str:
lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase = """bf16""" if fpaa else None
lowerCAmelCase, lowerCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
A_ , subfolder="""unet""" , dtype=A_ , revision=A_ )
return model, params
def __snake_case ( self , A_=0 , A_=(4, 77, 768) , A_=False ) -> List[str]:
lowerCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
lowerCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(A_ , A_ ) ) , dtype=A_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def __snake_case ( self , A_ , A_ , A_ ) -> List[str]:
lowerCAmelCase, lowerCAmelCase = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=A_ )
lowerCAmelCase = self.get_latents(A_ , fpaa=A_ )
lowerCAmelCase = self.get_encoder_hidden_states(A_ , fpaa=A_ )
lowerCAmelCase = model.apply(
{"""params""": params} , A_ , jnp.array(A_ , dtype=jnp.intaa ) , encoder_hidden_states=A_ , ).sample
assert sample.shape == latents.shape
lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase = jnp.array(A_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(A_ , A_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def __snake_case ( self , A_ , A_ , A_ ) -> Optional[Any]:
lowerCAmelCase, lowerCAmelCase = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=A_ )
lowerCAmelCase = self.get_latents(A_ , shape=(4, 4, 96, 96) , fpaa=A_ )
lowerCAmelCase = self.get_encoder_hidden_states(A_ , shape=(4, 77, 1024) , fpaa=A_ )
lowerCAmelCase = model.apply(
{"""params""": params} , A_ , jnp.array(A_ , dtype=jnp.intaa ) , encoder_hidden_states=A_ , ).sample
assert sample.shape == latents.shape
lowerCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCAmelCase = jnp.array(A_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(A_ , A_ , atol=1e-2 )
| 368 |
'''simple docstring'''
from __future__ import annotations
class __snake_case:
'''simple docstring'''
def __init__( self , A_ = 0 ) -> Dict:
lowerCAmelCase = key
def __snake_case ( self , A_ , A_ ) -> list[str]:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __snake_case ( self , A_ , A_ ) -> list[str]:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __snake_case ( self , A_ , A_ = 0 ) -> str:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = """"""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __snake_case ( self , A_ , A_ = 0 ) -> str:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = """"""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __snake_case ( self , A_ , A_ = 0 ) -> bool:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A_ , A_ ) )
except OSError:
return False
return True
def __snake_case ( self , A_ , A_ ) -> bool:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A_ , A_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 187 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase__ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : Tuple=False , **UpperCAmelCase_ : Dict ):
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = d_embed
SCREAMING_SNAKE_CASE__ = d_proj
SCREAMING_SNAKE_CASE__ = cutoffs + [vocab_size]
SCREAMING_SNAKE_CASE__ = [0] + self.cutoffs
SCREAMING_SNAKE_CASE__ = div_val
SCREAMING_SNAKE_CASE__ = self.cutoffs[0]
SCREAMING_SNAKE_CASE__ = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE__ = self.shortlist_size + self.n_clusters
SCREAMING_SNAKE_CASE__ = keep_order
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
def A_ ( self : Tuple , UpperCAmelCase_ : List[str] ):
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE__ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=__lowerCamelCase , name='cluster_weight' )
SCREAMING_SNAKE_CASE__ = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=__lowerCamelCase , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
SCREAMING_SNAKE_CASE__ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=__lowerCamelCase , name=F'out_projs_._{i}' , )
self.out_projs.append(__lowerCamelCase )
else:
self.out_projs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=__lowerCamelCase , name=F'out_layers_._{i}_._weight' , )
SCREAMING_SNAKE_CASE__ = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=__lowerCamelCase , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE__ = self.d_embed // (self.div_val**i)
SCREAMING_SNAKE_CASE__ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=__lowerCamelCase , name=F'out_projs_._{i}' )
self.out_projs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=__lowerCamelCase , name=F'out_layers_._{i}_._weight' , )
SCREAMING_SNAKE_CASE__ = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=__lowerCamelCase , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(__lowerCamelCase )
@staticmethod
def A_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]=None ):
SCREAMING_SNAKE_CASE__ = x
if proj is not None:
SCREAMING_SNAKE_CASE__ = tf.einsum('ibd,ed->ibe' , __lowerCamelCase , __lowerCamelCase )
return tf.einsum('ibd,nd->ibn' , __lowerCamelCase , __lowerCamelCase ) + b
@staticmethod
def A_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = shape_list(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tf.range(lp_size[0] , dtype=target.dtype )
SCREAMING_SNAKE_CASE__ = tf.stack([r, target] , 1 )
return tf.gather_nd(__lowerCamelCase , __lowerCamelCase )
def A_ ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=False ):
SCREAMING_SNAKE_CASE__ = 0
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE__ = self._logit(__lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
SCREAMING_SNAKE_CASE__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowerCamelCase , logits=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tf.nn.log_softmax(__lowerCamelCase , axis=-1 )
else:
SCREAMING_SNAKE_CASE__ = shape_list(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
SCREAMING_SNAKE_CASE__ = (target >= l_idx) & (target < r_idx)
SCREAMING_SNAKE_CASE__ = tf.where(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase ) - l_idx
if self.div_val == 1:
SCREAMING_SNAKE_CASE__ = self.out_layers[0][0][l_idx:r_idx]
SCREAMING_SNAKE_CASE__ = self.out_layers[0][1][l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE__ = self.out_layers[i][0]
SCREAMING_SNAKE_CASE__ = self.out_layers[i][1]
if i == 0:
SCREAMING_SNAKE_CASE__ = tf.concat([cur_W, self.cluster_weight] , 0 )
SCREAMING_SNAKE_CASE__ = tf.concat([cur_b, self.cluster_bias] , 0 )
SCREAMING_SNAKE_CASE__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[0] )
SCREAMING_SNAKE_CASE__ = tf.nn.log_softmax(__lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
SCREAMING_SNAKE_CASE__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = self._logit(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.out_projs[i] )
SCREAMING_SNAKE_CASE__ = tf.nn.log_softmax(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
SCREAMING_SNAKE_CASE__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__lowerCamelCase )
if target is not None:
SCREAMING_SNAKE_CASE__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tf.boolean_mask(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self._gather_logprob(__lowerCamelCase , __lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__lowerCamelCase , -cur_logprob , shape_list(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = tf.concat(__lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
SCREAMING_SNAKE_CASE__ = tf.reduce_mean(__lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__lowerCamelCase , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 176 |
import re
import string
import numpy as np
import datasets
UpperCAmelCase_ : Dict = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
UpperCAmelCase_ : Any = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
UpperCAmelCase_ : Tuple = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _A ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase :str = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in predictions] )
UpperCamelCase :Tuple = np.array([re.sub(__lowerCamelCase , """""" , __lowerCamelCase ) for x in references] )
else:
UpperCamelCase :Any = np.asarray(__lowerCamelCase )
UpperCamelCase :str = np.asarray(__lowerCamelCase )
if ignore_case:
UpperCamelCase :Tuple = np.char.lower(__lowerCamelCase )
UpperCamelCase :Any = np.char.lower(__lowerCamelCase )
if ignore_punctuation:
UpperCamelCase :Optional[int] = string.punctuation.maketrans("""""" , """""" , string.punctuation )
UpperCamelCase :Optional[Any] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
UpperCamelCase :List[str] = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
if ignore_numbers:
UpperCamelCase :Tuple = string.digits.maketrans("""""" , """""" , string.digits )
UpperCamelCase :Dict = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
UpperCamelCase :Tuple = np.char.translate(__lowerCamelCase , table=__lowerCamelCase )
UpperCamelCase :int = predictions == references
return {"exact_match": np.mean(__lowerCamelCase ) * 100}
| 38 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
__magic_name__: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__magic_name__: int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: bool = field(
default=snake_case_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
snake_case_ : str = self.task_name.lower()
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[int] = "train"
__magic_name__: Optional[int] = "dev"
__magic_name__: int = "test"
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: GlueDataTrainingArguments
__magic_name__: str
__magic_name__: List[InputFeatures]
def __init__( self : Tuple , _A : GlueDataTrainingArguments , _A : PreTrainedTokenizerBase , _A : Optional[int] = None , _A : Union[str, Split] = Split.train , _A : Optional[str] = None , ) -> Any:
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _A , )
snake_case_ : Optional[Any] = args
snake_case_ : int = glue_processors[args.task_name]()
snake_case_ : Dict = glue_output_modes[args.task_name]
if isinstance(_A , _A ):
try:
snake_case_ : int = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
snake_case_ : Dict = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
snake_case_ : Optional[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case_ ,snake_case_ : Union[str, Any] = label_list[2], label_list[1]
snake_case_ : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : List[Any] = cached_features_file + '.lock'
with FileLock(_A ):
if os.path.exists(_A ) and not args.overwrite_cache:
snake_case_ : int = time.time()
snake_case_ : Tuple = torch.load(_A )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
snake_case_ : Union[str, Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
snake_case_ : Any = self.processor.get_test_examples(args.data_dir )
else:
snake_case_ : Optional[Any] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
snake_case_ : List[Any] = examples[:limit_length]
snake_case_ : List[Any] = glue_convert_examples_to_features(
_A , _A , max_length=args.max_seq_length , label_list=_A , output_mode=self.output_mode , )
snake_case_ : int = time.time()
torch.save(self.features , _A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[Any] , _A : int ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
return self.label_list
| 88 |
import warnings
from ..trainer import Trainer
from ..utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Any , _A : str=None , **_A : Union[str, Any] ) -> Any:
"""simple docstring"""
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , _A , )
super().__init__(args=_A , **_A )
| 88 | 1 |
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCAmelCase_ (lowerCAmelCase__: jnp.ndarray , lowerCAmelCase__: int , lowerCAmelCase__: float = 1 , lowerCAmelCase__: float = 1 , lowerCAmelCase__: float = 1.0e4 , lowerCAmelCase__: bool = False , lowerCAmelCase__: float = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'Embedding dimension {embedding_dim} should be even'
UpperCAmelCase_: Tuple = float(embedding_dim // 2 )
UpperCAmelCase_: int = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCAmelCase_: Optional[int] = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase__ , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCAmelCase_: Dict = jnp.expand_dims(lowerCAmelCase__ , 1 ) * jnp.expand_dims(lowerCAmelCase__ , 0 )
# scale embeddings
UpperCAmelCase_: int = scale * emb
if flip_sin_to_cos:
UpperCAmelCase_: str = jnp.concatenate([jnp.cos(lowerCAmelCase__ ), jnp.sin(lowerCAmelCase__ )] , axis=1 )
else:
UpperCAmelCase_: Union[str, Any] = jnp.concatenate([jnp.sin(lowerCAmelCase__ ), jnp.cos(lowerCAmelCase__ )] , axis=1 )
UpperCAmelCase_: int = jnp.reshape(lowerCAmelCase__ , [jnp.shape(lowerCAmelCase__ )[0], embedding_dim] )
return signal
class _a ( nn.Module ):
A = 32
A = jnp.floataa
@nn.compact
def __call__(self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCAmelCase_: str = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="""linear_1""" )(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = nn.silu(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = nn.Dense(self.time_embed_dim, dtype=self.dtype, name="""linear_2""" )(SCREAMING_SNAKE_CASE_ )
return temb
class _a ( nn.Module ):
A = 32
A = False
A = 1
@nn.compact
def __call__(self, SCREAMING_SNAKE_CASE_ ) -> str:
return get_sinusoidal_embeddings(
SCREAMING_SNAKE_CASE_, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift )
| 147 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: int , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: int = hf_hub_url(repo_id=lowerCAmelCase__ , path=lowerCAmelCase__ , revision=lowerCAmelCase__ )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(lowerCAmelCase__ )}'
| 147 | 1 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , *,
__lowercase = 4 , __lowercase = 768 , __lowercase , __lowercase , ) -> str:
super().__init__()
__UpperCamelCase :Any = nn.Parameter(torch.zeros(__lowercase))
# parameters for additional clip time embeddings
__UpperCamelCase :Optional[Any] = nn.Linear(__lowercase , __lowercase)
__UpperCamelCase :int = nn.Linear(__lowercase , __lowercase)
# parameters for encoder hidden states
__UpperCamelCase :Tuple = clip_extra_context_tokens
__UpperCamelCase :int = nn.Linear(
__lowercase , self.clip_extra_context_tokens * cross_attention_dim)
__UpperCamelCase :Union[str, Any] = nn.Linear(__lowercase , __lowercase)
__UpperCamelCase :int = nn.LayerNorm(__lowercase)
def UpperCamelCase__ ( self , *, __lowercase , __lowercase , __lowercase , __lowercase) -> Tuple:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__UpperCamelCase :str = image_embeddings.shape[0]
__UpperCamelCase :int = self.learned_classifier_free_guidance_embeddings.unsqueeze(0)
__UpperCamelCase :Any = classifier_free_guidance_embeddings.expand(
__lowercase , -1)
__UpperCamelCase :List[Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0)
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__UpperCamelCase :Tuple = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__UpperCamelCase :Dict = self.embedding_proj(__lowercase)
__UpperCamelCase :Optional[Any] = self.clip_image_embeddings_project_to_time_embeddings(__lowercase)
__UpperCamelCase :Any = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__UpperCamelCase :Tuple = self.clip_extra_context_tokens_proj(__lowercase)
__UpperCamelCase :Optional[int] = clip_extra_context_tokens.reshape(__lowercase , -1 , self.clip_extra_context_tokens)
__UpperCamelCase :Dict = clip_extra_context_tokens.permute(0 , 2 , 1)
__UpperCamelCase :Dict = self.encoder_hidden_states_proj(__lowercase)
__UpperCamelCase :Union[str, Any] = self.text_encoder_hidden_states_norm(__lowercase)
__UpperCamelCase :str = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1)
return text_encoder_hidden_states, additive_clip_time_embeddings
| 105 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = 0
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :int = Path(__lowercase) / '''preprocessor_config.json'''
__UpperCamelCase :Dict = Path(__lowercase) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w'''))
__UpperCamelCase :Union[str, Any] = AutoImageProcessor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :str = Path(__lowercase) / '''preprocessor_config.json'''
__UpperCamelCase :Union[str, Any] = Path(__lowercase) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w'''))
__UpperCamelCase :Dict = AutoImageProcessor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__UpperCamelCase :Tuple = Path(__lowercase) / '''preprocessor_config.json'''
__UpperCamelCase :Optional[Any] = Path(__lowercase) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w'''))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained(__lowercase).to_dict()
config_dict.pop('''image_processor_type''')
__UpperCamelCase :List[str] = CLIPImageProcessor(**__lowercase)
# save in new folder
model_config.save_pretrained(__lowercase)
config.save_pretrained(__lowercase)
__UpperCamelCase :Dict = AutoImageProcessor.from_pretrained(__lowercase)
# make sure private variable is not incorrectly saved
__UpperCamelCase :Union[str, Any] = json.loads(config.to_json_string())
self.assertTrue('''_processor_class''' not in dict_as_saved)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :Tuple = Path(__lowercase) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
__UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Optional[int]:
with self.assertRaisesRegex(
__lowercase , '''clip-base is not a local folder and is not a valid model identifier'''):
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained('''clip-base''')
def UpperCamelCase__ ( self) -> List[Any]:
with self.assertRaisesRegex(
__lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__UpperCamelCase :str = AutoImageProcessor.from_pretrained(__lowercase , revision='''aaaaaa''')
def UpperCamelCase__ ( self) -> List[str]:
with self.assertRaisesRegex(
__lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''')
def UpperCamelCase__ ( self) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase):
__UpperCamelCase :Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase):
__UpperCamelCase :List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase)
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase)
__UpperCamelCase :List[Any] = AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase)
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''')
def UpperCamelCase__ ( self) -> Optional[Any]:
try:
AutoConfig.register('''custom''' , __lowercase)
AutoImageProcessor.register(__lowercase , __lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase):
AutoImageProcessor.register(__lowercase , __lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :int = Path(__lowercase) / '''preprocessor_config.json'''
__UpperCamelCase :List[str] = Path(__lowercase) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w'''))
__UpperCamelCase :int = CustomImageProcessor.from_pretrained(__lowercase)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase)
__UpperCamelCase :int = AutoImageProcessor.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self) -> List[Any]:
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = True
try:
AutoConfig.register('''custom''' , __lowercase)
AutoImageProcessor.register(__lowercase , __lowercase)
# If remote code is not set, the default is to use local
__UpperCamelCase :str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
__UpperCamelCase :Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
__UpperCamelCase :List[str] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(not hasattr(__lowercase , '''is_local'''))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 105 | 1 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=sys.maxsize ) ->List[Any]:
"""simple docstring"""
snake_case_ = """bilinear"""
snake_case_ = max_size
snake_case_ = short_edge_length
def __call__( self : List[Any] , UpperCAmelCase_ : Dict ) ->int:
"""simple docstring"""
snake_case_ = []
for img in imgs:
snake_case_ , snake_case_ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case_ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
snake_case_ = size * 1.0 / min(UpperCAmelCase_ , UpperCAmelCase_ )
if h < w:
snake_case_ , snake_case_ = size, scale * w
else:
snake_case_ , snake_case_ = scale * h, size
if max(UpperCAmelCase_ , UpperCAmelCase_ ) > self.max_size:
snake_case_ = self.max_size * 1.0 / max(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = newh * scale
snake_case_ = neww * scale
snake_case_ = int(neww + 0.5 )
snake_case_ = int(newh + 0.5 )
if img.dtype == np.uinta:
snake_case_ = Image.fromarray(UpperCAmelCase_ )
snake_case_ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
snake_case_ = np.asarray(UpperCAmelCase_ )
else:
snake_case_ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
snake_case_ = nn.functional.interpolate(
UpperCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCAmelCase_ ).squeeze(0 )
img_augs.append(UpperCAmelCase_ )
return img_augs
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
snake_case_ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
snake_case_ = cfg.INPUT.FORMAT
snake_case_ = cfg.SIZE_DIVISIBILITY
snake_case_ = cfg.PAD_VALUE
snake_case_ = cfg.INPUT.MAX_SIZE_TEST
snake_case_ = cfg.MODEL.DEVICE
snake_case_ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
snake_case_ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
snake_case_ = lambda UpperCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = tuple(max(UpperCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
snake_case_ = [im.shape[-2:] for im in images]
snake_case_ = [
nn.functional.pad(
UpperCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
return torch.stack(UpperCAmelCase_ ), torch.tensor(UpperCAmelCase_ )
def __call__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]=False ) ->Dict:
"""simple docstring"""
with torch.no_grad():
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = [images]
if single_image:
assert len(UpperCAmelCase_ ) == 1
for i in range(len(UpperCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCAmelCase_ , images.pop(UpperCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
snake_case_ = torch.tensor([im.shape[:2] for im in images] )
snake_case_ = self.aug(UpperCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case_ = [self.normalizer(UpperCAmelCase_ ) for x in images]
# now pad them to do the following operations
snake_case_ , snake_case_ = self.pad(UpperCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case_ = torch.true_divide(UpperCAmelCase_ , UpperCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
assert torch.isfinite(_SCREAMING_SNAKE_CASE ).all(), "Box tensor contains infinite or NaN!"
snake_case_ , snake_case_ = box_size
tensor[:, 0].clamp_(min=0 , max=_SCREAMING_SNAKE_CASE )
tensor[:, 1].clamp_(min=0 , max=_SCREAMING_SNAKE_CASE )
tensor[:, 2].clamp_(min=0 , max=_SCREAMING_SNAKE_CASE )
tensor[:, 3].clamp_(min=0 , max=_SCREAMING_SNAKE_CASE )
| 347 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = rotary_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = None
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = FlaxGPTJModelTester(self )
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = False
snake_case_ = model.config.eos_token_id
snake_case_ = jax.jit(model.generate )
snake_case_ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
snake_case_ = fx_state
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class A_ ( UpperCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Tuple = """van"""
def __init__( self , snake_case=224 , snake_case=3 , snake_case=[7, 3, 3, 3] , snake_case=[4, 2, 2, 2] , snake_case=[64, 128, 320, 512] , snake_case=[3, 3, 12, 3] , snake_case=[8, 8, 4, 4] , snake_case="gelu" , snake_case=0.02 , snake_case=1E-6 , snake_case=1E-2 , snake_case=0.0 , snake_case=0.0 , **snake_case , ):
super().__init__(**__lowercase )
lowercase = image_size
lowercase = num_channels
lowercase = patch_sizes
lowercase = strides
lowercase = hidden_sizes
lowercase = depths
lowercase = mlp_ratios
lowercase = hidden_act
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = layer_scale_init_value
lowercase = drop_path_rate
lowercase = dropout_rate
| 195 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
lowercase__ : List[Any] = True
from torch.cuda.amp import autocast
lowercase__ : List[Any] = logging.getLogger(__name__)
def lowerCamelCase__ ( _A=None , _A=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_A )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowerCAmelCase_ = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
lowerCAmelCase_ = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
lowerCAmelCase_ = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
lowerCAmelCase_ = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
lowerCAmelCase_ = field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
lowerCAmelCase_ = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase_ = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase_ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase_ = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = True
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __call__( self : Optional[int] , __lowercase : List[Dict[str, Union[List[int], torch.Tensor]]] ):
"""simple docstring"""
snake_case_ = [{"input_values": feature["input_values"]} for feature in features]
snake_case_ = [{"input_ids": feature["labels"]} for feature in features]
snake_case_ = self.processor.pad(
__lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
snake_case_ = self.processor.pad(
labels=__lowercase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
snake_case_ = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_00 )
snake_case_ = labels
return batch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self : str , __lowercase : nn.Module , __lowercase : Dict[str, Union[torch.Tensor, Any]] ):
"""simple docstring"""
model.train()
snake_case_ = self._prepare_inputs(__lowercase )
if self.use_amp:
with autocast():
snake_case_ = self.compute_loss(__lowercase , __lowercase )
else:
snake_case_ = self.compute_loss(__lowercase , __lowercase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
snake_case_ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case_ = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
snake_case_ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__lowercase ).backward()
elif self.use_apex:
with amp.scale_loss(__lowercase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__lowercase )
else:
loss.backward()
return loss.detach()
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _A )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
snake_case_ = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
snake_case_ = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
snake_case_ = f"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(_A ):
snake_case_ = re.sub(_A , "" , batch["sentence"] ).lower() + " "
return batch
snake_case_ = train_dataset.map(_A , remove_columns=["sentence"] )
snake_case_ = eval_dataset.map(_A , remove_columns=["sentence"] )
def extract_all_chars(_A ):
snake_case_ = " ".join(batch["text"] )
snake_case_ = list(set(_A ) )
return {"vocab": [vocab], "all_text": [all_text]}
snake_case_ = train_dataset.map(
_A , batched=_A , batch_size=-1 , keep_in_memory=_A , remove_columns=train_dataset.column_names , )
snake_case_ = train_dataset.map(
_A , batched=_A , batch_size=-1 , keep_in_memory=_A , remove_columns=eval_dataset.column_names , )
snake_case_ = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
snake_case_ = {v: k for k, v in enumerate(_A )}
snake_case_ = vocab_dict[" "]
del vocab_dict[" "]
snake_case_ = len(_A )
snake_case_ = len(_A )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(_A , _A )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=_A , return_attention_mask=_A )
snake_case_ = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
snake_case_ = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
snake_case_ = min(len(_A ) , data_args.max_train_samples )
snake_case_ = train_dataset.select(range(_A ) )
if data_args.max_val_samples is not None:
snake_case_ = eval_dataset.select(range(data_args.max_val_samples ) )
snake_case_ = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_A ):
snake_case_ , snake_case_ = torchaudio.load(batch["path"] )
snake_case_ = resampler(_A ).squeeze().numpy()
snake_case_ = 16000
snake_case_ = batch["text"]
return batch
snake_case_ = train_dataset.map(
_A , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
snake_case_ = eval_dataset.map(
_A , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_A ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
snake_case_ = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(_A )
return batch
snake_case_ = train_dataset.map(
_A , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_A , num_proc=data_args.preprocessing_num_workers , )
snake_case_ = eval_dataset.map(
_A , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_A , num_proc=data_args.preprocessing_num_workers , )
# Metric
snake_case_ = datasets.load_metric("wer" )
def compute_metrics(_A ):
snake_case_ = pred.predictions
snake_case_ = np.argmax(_A , axis=-1 )
snake_case_ = processor.tokenizer.pad_token_id
snake_case_ = processor.batch_decode(_A )
# we do not want to group tokens when computing the metrics
snake_case_ = processor.batch_decode(pred.label_ids , group_tokens=_A )
snake_case_ = wer_metric.compute(predictions=_A , references=_A )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
snake_case_ = DataCollatorCTCWithPadding(processor=_A , padding=_A )
# Initialize our Trainer
snake_case_ = CTCTrainer(
model=_A , data_collator=_A , args=_A , compute_metrics=_A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case_ = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
snake_case_ = model_args.model_name_or_path
else:
snake_case_ = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
snake_case_ = trainer.train(resume_from_checkpoint=_A )
trainer.save_model()
snake_case_ = train_result.metrics
snake_case_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_A )
)
snake_case_ = min(_A , len(_A ) )
trainer.log_metrics("train" , _A )
trainer.save_metrics("train" , _A )
trainer.save_state()
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case_ = trainer.evaluate()
snake_case_ = data_args.max_val_samples if data_args.max_val_samples is not None else len(_A )
snake_case_ = min(_A , len(_A ) )
trainer.log_metrics("eval" , _A )
trainer.save_metrics("eval" , _A )
return results
if __name__ == "__main__":
main()
| 187 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
stooge(__lowerCamelCase , 0 , len(__lowerCamelCase ) - 1 )
return arr
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ):
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_UpperCAmelCase , _UpperCAmelCase : Dict =arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_UpperCAmelCase : Tuple =(int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__lowerCamelCase , __lowerCamelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(__lowerCamelCase , i + t , (__lowerCamelCase) )
# Recursively sort first 2/3 elements
stooge(__lowerCamelCase , __lowerCamelCase , (h - t) )
if __name__ == "__main__":
lowercase =input('Enter numbers separated by a comma:\n').strip()
lowercase =[int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 242 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="glpn"
def __init__( self , snake_case=3 , snake_case=4 , snake_case=[2, 2, 2, 2] , snake_case=[8, 4, 2, 1] , snake_case=[3_2, 6_4, 1_6_0, 2_5_6] , snake_case=[7, 3, 3, 3] , snake_case=[4, 2, 2, 2] , snake_case=[1, 2, 5, 8] , snake_case=[4, 4, 4, 4] , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=0.1 , snake_case=1E-6 , snake_case=6_4 , snake_case=1_0 , snake_case=-1 , **snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__(**snake_case)
_UpperCAmelCase : Any =num_channels
_UpperCAmelCase : List[str] =num_encoder_blocks
_UpperCAmelCase : Optional[Any] =depths
_UpperCAmelCase : str =sr_ratios
_UpperCAmelCase : Dict =hidden_sizes
_UpperCAmelCase : List[str] =patch_sizes
_UpperCAmelCase : Any =strides
_UpperCAmelCase : List[str] =mlp_ratios
_UpperCAmelCase : Dict =num_attention_heads
_UpperCAmelCase : List[str] =hidden_act
_UpperCAmelCase : int =hidden_dropout_prob
_UpperCAmelCase : List[Any] =attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Tuple =drop_path_rate
_UpperCAmelCase : str =layer_norm_eps
_UpperCAmelCase : Optional[int] =decoder_hidden_size
_UpperCAmelCase : List[str] =max_depth
_UpperCAmelCase : Dict =head_in_index
| 242 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.