code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
@staticmethod
def __lowerCamelCase ( *_A ,**_A ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ObjectDetectionPipeline(model=_A ,image_processor=_A )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' ,threshold=0.0 )
self.assertGreater(len(_A ) ,0 )
for detected_object in outputs:
self.assertEqual(
_A ,{
'score': ANY(_A ),
'label': ANY(_A ),
'box': {'xmin': ANY(_A ), 'ymin': ANY(_A ), 'xmax': ANY(_A ), 'ymax': ANY(_A )},
} ,)
import datasets
_lowerCAmelCase : List[Any] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' ,'image' ,split='test' )
_lowerCAmelCase : Optional[int] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_lowerCAmelCase : Dict = object_detector(_A ,threshold=0.0 )
self.assertEqual(len(_A ) ,len(_A ) )
for outputs in batch_outputs:
self.assertGreater(len(_A ) ,0 )
for detected_object in outputs:
self.assertEqual(
_A ,{
'score': ANY(_A ),
'label': ANY(_A ),
'box': {'xmin': ANY(_A ), 'ymin': ANY(_A ), 'xmax': ANY(_A ), 'ymax': ANY(_A )},
} ,)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = 'hf-internal-testing/tiny-detr-mobilenetsv3'
_lowerCAmelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(_A )
_lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained(_A )
_lowerCAmelCase : Tuple = ObjectDetectionPipeline(model=_A ,feature_extractor=_A )
_lowerCAmelCase : str = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ,threshold=0.0 )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] ,)
_lowerCAmelCase : Optional[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] ,)
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 'facebook/detr-resnet-50'
_lowerCAmelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(_A )
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained(_A )
_lowerCAmelCase : List[str] = ObjectDetectionPipeline(model=_A ,feature_extractor=_A )
_lowerCAmelCase : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
_lowerCAmelCase : Optional[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] ,)
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'facebook/detr-resnet-50'
_lowerCAmelCase : Dict = pipeline('object-detection' ,model=_A )
_lowerCAmelCase : Optional[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
_lowerCAmelCase : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] ,)
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0.9_9_8_5
_lowerCAmelCase : str = 'facebook/detr-resnet-50'
_lowerCAmelCase : Optional[int] = pipeline('object-detection' ,model=_A )
_lowerCAmelCase : Optional[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ,threshold=_A )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'Narsil/layoutlmv3-finetuned-funsd'
_lowerCAmelCase : Tuple = 0.9_9_9_3
_lowerCAmelCase : str = pipeline('object-detection' ,model=_A ,threshold=_A )
_lowerCAmelCase : Any = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] ,)
| 16 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
while number > 0:
_lowerCAmelCase : Optional[int] = number % 10
sum_of_digits += last_digit
_lowerCAmelCase : int = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase__ ( _lowerCamelCase = 100 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = factorial(_lowerCamelCase )
_lowerCAmelCase : List[Any] = split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 16 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 1 |
"""simple docstring"""
import math
from collections.abc import Callable
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : float = xa
_lowerCAmelCase : float = xa
while True:
if x_n == x_na or function(_lowerCamelCase ) == function(_lowerCamelCase ):
raise ZeroDivisionError('float division by zero, could not find root' )
_lowerCAmelCase : float = x_na - (
function(_lowerCamelCase ) / ((function(_lowerCamelCase ) - function(_lowerCamelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_lowerCAmelCase : Tuple = x_na
_lowerCAmelCase : int = x_na
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return math.pow(_lowerCamelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 16 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_lowerCAmelCase : Tuple = DetaConfig(
backbone_config=_lowerCamelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_lowerCamelCase , with_box_refine=_lowerCamelCase , two_stage=_lowerCamelCase , )
# set labels
_lowerCAmelCase : Optional[int] = 'huggingface/label-files'
if "o365" in model_name:
_lowerCAmelCase : int = 366
_lowerCAmelCase : Tuple = 'object365-id2label.json'
else:
_lowerCAmelCase : List[Any] = 91
_lowerCAmelCase : str = 'coco-detection-id2label.json'
_lowerCAmelCase : Any = num_labels
_lowerCAmelCase : int = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) ) , 'r' ) )
_lowerCAmelCase : int = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = val
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCAmelCase : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCAmelCase : Optional[int] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_lowerCAmelCase : Union[str, Any] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCAmelCase : List[str] = in_proj_bias[: dim]
_lowerCAmelCase : str = in_proj_weight[
dim : dim * 2, :
]
_lowerCAmelCase : List[str] = in_proj_bias[
dim : dim * 2
]
_lowerCAmelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCAmelCase : Any = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_lowerCAmelCase : Any = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Union[str, Any] = in_proj_weight[:hidden_size, :]
_lowerCAmelCase : Optional[Any] = in_proj_bias[:hidden_size]
_lowerCAmelCase : List[str] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[-hidden_size:, :]
_lowerCAmelCase : List[Any] = in_proj_bias[-hidden_size:]
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_deta_config(_lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
_lowerCAmelCase : List[str] = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_lowerCAmelCase : Tuple = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(_lowerCamelCase , param.shape )
# rename keys
_lowerCAmelCase : str = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : List[Any] = val
if "input_proj" in key:
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_lowerCAmelCase : Any = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : int = val
# finally, create HuggingFace model and load state dict
_lowerCAmelCase : List[str] = DetaForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
_lowerCAmelCase : Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(_lowerCamelCase )
# load image processor
_lowerCAmelCase : int = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : str = processor(images=_lowerCamelCase , return_tensors='pt' )
_lowerCAmelCase : str = encoding['pixel_values']
_lowerCAmelCase : str = model(pixel_values.to(_lowerCamelCase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_lowerCAmelCase : int = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_lowerCAmelCase : Optional[int] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_lowerCAmelCase : int = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowerCamelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowerCamelCase ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 16 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 1 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowerCAmelCase = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {}
state_dict.pop('pixel_mean' , _lowerCamelCase )
state_dict.pop('pixel_std' , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCAmelCase : Optional[int] = key.replace(_lowerCamelCase , _lowerCamelCase )
if re.match(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = int(re.match(_lowerCamelCase , _lowerCamelCase ).group(2 ) )
if layer_nb == 0:
_lowerCAmelCase : List[str] = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
_lowerCAmelCase : List[str] = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
_lowerCAmelCase : Dict = key.replace('layers.2' , 'proj_out' )
_lowerCAmelCase : Tuple = value
_lowerCAmelCase : int = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="ybelkada/segment-anything" ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hf_hub_download(_lowerCamelCase , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
_lowerCAmelCase : Optional[int] = SamConfig()
elif "sam_vit_l" in model_name:
_lowerCAmelCase : Any = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_lowerCAmelCase : Union[str, Any] = SamConfig(
vision_config=_lowerCamelCase , )
elif "sam_vit_h" in model_name:
_lowerCAmelCase : Any = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_lowerCAmelCase : str = SamConfig(
vision_config=_lowerCamelCase , )
_lowerCAmelCase : Tuple = torch.load(_lowerCamelCase , map_location='cpu' )
_lowerCAmelCase : Optional[Any] = replace_keys(_lowerCamelCase )
_lowerCAmelCase : Dict = SamImageProcessor()
_lowerCAmelCase : Optional[Any] = SamProcessor(image_processor=_lowerCamelCase )
_lowerCAmelCase : List[str] = SamModel(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = hf_model.to('cuda' )
_lowerCAmelCase : List[Any] = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
_lowerCAmelCase : Any = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert('RGB' )
_lowerCAmelCase : Optional[int] = [[[400, 650]]]
_lowerCAmelCase : Union[str, Any] = [[1]]
_lowerCAmelCase : Optional[int] = processor(images=np.array(_lowerCamelCase ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = hf_model(**_lowerCamelCase )
_lowerCAmelCase : Tuple = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
_lowerCAmelCase : List[Any] = processor(
images=np.array(_lowerCamelCase ) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = hf_model(**_lowerCamelCase )
_lowerCAmelCase : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
_lowerCAmelCase : List[str] = ((75, 275, 1725, 850),)
_lowerCAmelCase : List[Any] = processor(images=np.array(_lowerCamelCase ) , input_boxes=_lowerCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_lowerCAmelCase : Tuple = hf_model(**_lowerCamelCase )
_lowerCAmelCase : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
_lowerCAmelCase : Any = [[[400, 650], [800, 650]]]
_lowerCAmelCase : Dict = [[1, 1]]
_lowerCAmelCase : str = processor(
images=np.array(_lowerCamelCase ) , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_lowerCAmelCase : str = hf_model(**_lowerCamelCase )
_lowerCAmelCase : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
_lowerCAmelCase = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
_lowerCAmelCase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 16 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 1 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
if name is None:
_lowerCAmelCase : Optional[Any] = None
else:
_lowerCAmelCase : List[str] = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
_lowerCAmelCase : Dict = fmt.format(_lowerCamelCase )
# Print and recurse (if needed).
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if msg is not None:
print(_lowerCamelCase )
for k in val.keys():
recursive_print(_lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(_lowerCamelCase , torch.Tensor ):
print(_lowerCamelCase , ':' , val.size() )
else:
print(_lowerCamelCase , ':' , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCAmelCase : List[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCAmelCase : List[str] = param.view(*_lowerCamelCase )
_lowerCAmelCase : int = param.transpose(0 , 2 )
_lowerCAmelCase : List[str] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCAmelCase : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCAmelCase : Tuple = param.view(*_lowerCamelCase )
_lowerCAmelCase : Optional[int] = param.transpose(0 , 1 ).contiguous()
_lowerCAmelCase : str = param.view(*_lowerCamelCase )
return param
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {}
# old versions did not store training args
_lowerCAmelCase : Optional[int] = input_state_dict.get('args' , _lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCAmelCase : Any = ds_args.padded_vocab_size
_lowerCAmelCase : List[str] = ds_args.max_position_embeddings
_lowerCAmelCase : Union[str, Any] = ds_args.hidden_size
_lowerCAmelCase : Any = ds_args.num_layers
_lowerCAmelCase : str = ds_args.num_attention_heads
_lowerCAmelCase : Tuple = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCAmelCase : List[str] = config.n_head
# The hidden_size per head.
_lowerCAmelCase : int = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCAmelCase : str = input_state_dict['checkpoint_version']
else:
_lowerCAmelCase : Tuple = 0.0
# The model.
_lowerCAmelCase : List[str] = input_state_dict['model']
# The language model.
_lowerCAmelCase : int = model['language_model']
# The embeddings.
_lowerCAmelCase : int = lm['embedding']
# The word embeddings.
_lowerCAmelCase : List[Any] = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
_lowerCAmelCase : int = word_embeddings[: config.vocab_size, :]
_lowerCAmelCase : List[Any] = word_embeddings
# The position embeddings.
_lowerCAmelCase : Dict = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCAmelCase : int = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
_lowerCAmelCase : List[Any] = pos_embeddings
# The transformer.
_lowerCAmelCase : Dict = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
_lowerCAmelCase : Any = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
_lowerCAmelCase : Tuple = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCAmelCase : Optional[int] = layer_re.match(_lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCAmelCase : Any = int(m.group(1 ) )
# The name of the operation.
_lowerCAmelCase : int = m.group(2 )
# Is it a weight or a bias?
_lowerCAmelCase : Optional[Any] = m.group(3 )
# The name of the layer.
_lowerCAmelCase : Any = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
_lowerCAmelCase : List[Any] = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
_lowerCAmelCase : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCAmelCase : Union[str, Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : str = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCAmelCase : int = torch.tensor(-1e4 , dtype=torch.floataa )
_lowerCAmelCase : Union[str, Any] = masked_bias
_lowerCAmelCase : Optional[int] = fix_query_key_value_ordering(_lowerCamelCase , _lowerCamelCase , 3 , _lowerCamelCase , _lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCAmelCase : Any = out_val.transpose(0 , 1 ).contiguous()
# Store.
_lowerCAmelCase : Union[str, Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCAmelCase : Union[str, Any] = fix_query_key_value_ordering(_lowerCamelCase , _lowerCamelCase , 3 , _lowerCamelCase , _lowerCamelCase )
# Store. No change of shape.
_lowerCAmelCase : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCAmelCase : Union[str, Any] = megatron_to_transformers[op_name]
_lowerCAmelCase : List[str] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCAmelCase : List[str] = megatron_to_transformers[op_name]
_lowerCAmelCase : Union[str, Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCAmelCase : Any = transformer['final_layernorm.weight']
_lowerCAmelCase : Dict = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCAmelCase : Tuple = word_embeddings
# It should be done!
return output_state_dict
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
_lowerCAmelCase : int = parser.parse_args()
# Extract the basename.
_lowerCAmelCase : Union[str, Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase , map_location='cpu' )
else:
_lowerCAmelCase : int = torch.load(args.path_to_checkpoint , map_location='cpu' )
_lowerCAmelCase : str = input_state_dict.get('args' , _lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCAmelCase : str = 'gelu_fast'
elif ds_args.openai_gelu:
_lowerCAmelCase : int = 'gelu_new'
else:
_lowerCAmelCase : Dict = 'gelu'
else:
# in the very early days this used to be "gelu_new"
_lowerCAmelCase : str = 'gelu_new'
# Spell out all parameters in case the defaults change.
_lowerCAmelCase : List[Any] = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_lowerCamelCase , summary_activation=_lowerCamelCase , summary_proj_to_labels=_lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
_lowerCAmelCase : Optional[Any] = GPTaConfig.from_json_file(args.config_file )
_lowerCAmelCase : Any = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
_lowerCAmelCase : Any = convert_megatron_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCamelCase , _lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCAmelCase : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCAmelCase : Any = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
_lowerCAmelCase : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
_lowerCAmelCase : List[str] = 'gpt2'
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = type(_lowerCamelCase ).__name__
_lowerCAmelCase : str = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_lowerCamelCase )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_lowerCamelCase )
# Store the state_dict to file.
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase , 'pytorch_model.bin' )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_lowerCamelCase , _lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = " " ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[Any] = 0
for index, char in enumerate(_lowerCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
_lowerCAmelCase : str = index + 1
elif index + 1 == len(_lowerCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 16 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=False ,_A=True ,_A="None" ,_A=3 ,_A=4 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : List[str] = seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : Optional[Any] = use_input_mask
_lowerCAmelCase : str = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Dict = num_labels
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : Optional[Any] = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Any = pos_att_type
_lowerCAmelCase : List[Any] = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
_lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = DebertaVaModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = model(_A ,attention_mask=_A ,token_type_ids=_A )[0]
_lowerCAmelCase : Tuple = model(_A ,token_type_ids=_A )[0]
_lowerCAmelCase : Union[str, Any] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = DebertaVaForMaskedLM(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : List[Any] = DebertaVaForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[Any] = model(_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = self.num_labels
_lowerCAmelCase : Dict = DebertaVaForTokenClassification(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = DebertaVaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : str = model(
_A ,attention_mask=_A ,token_type_ids=_A ,start_positions=_A ,end_positions=_A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = DebertaVaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCAmelCase : Dict = model(
_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
),
) : Tuple = config_and_inputs
_lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = DebertaVaModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
_lowerCAmelCase : Any = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_lowerCAmelCase : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Tuple = model(_A ,attention_mask=_A )[0]
# compare the actual values for a slice.
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,_A ,atol=1E-4 ) ,F"""{output[:, 1:4, 1:4]}""" )
| 16 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' ,return_dict=_A ).to(_A )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('google/mt5-small' )
_lowerCAmelCase : List[Any] = tokenizer('Hello there' ,return_tensors='pt' ).input_ids
_lowerCAmelCase : Any = tokenizer('Hi I am' ,return_tensors='pt' ).input_ids
_lowerCAmelCase : int = model(input_ids.to(_A ) ,labels=labels.to(_A ) ).loss
_lowerCAmelCase : str = -(labels.shape[-1] * loss.item())
_lowerCAmelCase : str = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 16 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=4 ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : Dict = use_attention_mask
_lowerCAmelCase : str = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Tuple = num_choices
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_attention_mask:
_lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCAmelCase : List[str] = AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_A ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Any = config_and_inputs
_lowerCAmelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class_name.from_pretrained('albert-base-v2' )
_lowerCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = FlaxAlbertModel.from_pretrained('albert-base-v2' )
_lowerCAmelCase : Any = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCAmelCase : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCAmelCase : List[Any] = model(_A ,attention_mask=_A )[0]
_lowerCAmelCase : List[Any] = (1, 11, 768)
self.assertEqual(output.shape ,_A )
_lowerCAmelCase : str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,_A ,atol=1E-4 ) )
| 16 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = KandinskyVaaControlnetPipeline
_UpperCAmelCase = ["image_embeds", "negative_image_embeds", "hint"]
_UpperCAmelCase = ["image_embeds", "negative_image_embeds", "hint"]
_UpperCAmelCase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCAmelCase = False
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 32
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 32
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 100
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : int = UNetaDConditionModel(**_A )
return model
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.dummy_unet
_lowerCAmelCase : int = self.dummy_movq
_lowerCAmelCase : str = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule='linear' ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=_A ,set_alpha_to_one=_A ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=_A ,)
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_A ) ).to(_A )
_lowerCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_A )
# create hint
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('mps' ):
_lowerCAmelCase : List[Any] = torch.manual_seed(_A )
else:
_lowerCAmelCase : str = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : str = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'cpu'
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : List[Any] = self.pipeline_class(**_A )
_lowerCAmelCase : Union[str, Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : int = pipe(**self.get_dummy_inputs(_A ) )
_lowerCAmelCase : Any = output.images
_lowerCAmelCase : List[str] = pipe(
**self.get_dummy_inputs(_A ) ,return_dict=_A ,)[0]
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : int = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
_lowerCAmelCase : Optional[int] = torch.from_numpy(np.array(_A ) ).float() / 2_5_5.0
_lowerCAmelCase : Tuple = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
_lowerCAmelCase : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(_A )
_lowerCAmelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa )
_lowerCAmelCase : int = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[str] = 'A robot, 4k photo'
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cuda' ).manual_seed(0 )
_lowerCAmelCase, _lowerCAmelCase : Tuple = pipe_prior(
_A ,generator=_A ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
_lowerCAmelCase : Any = torch.Generator(device='cuda' ).manual_seed(0 )
_lowerCAmelCase : List[Any] = pipeline(
image_embeds=_A ,negative_image_embeds=_A ,hint=_A ,generator=_A ,num_inference_steps=100 ,output_type='np' ,)
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_A ,_A )
| 16 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=24 ,_A=2 ,_A=6 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=3 ,_A=None ,_A=1000 ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Dict = seq_length
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : Any = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[str] = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Optional[Any] = num_labels
_lowerCAmelCase : List[str] = scope
_lowerCAmelCase : Optional[int] = range_bbox
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Union[str, Any] = bbox[i, j, 3]
_lowerCAmelCase : str = bbox[i, j, 1]
_lowerCAmelCase : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : Optional[Any] = bbox[i, j, 0]
_lowerCAmelCase : List[str] = t
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCAmelCase : int = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCAmelCase : str = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : Any = LiltModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[str] = model(_A ,bbox=_A ,attention_mask=_A ,token_type_ids=_A )
_lowerCAmelCase : Tuple = model(_A ,bbox=_A ,token_type_ids=_A )
_lowerCAmelCase : Optional[int] = model(_A ,bbox=_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_labels
_lowerCAmelCase : List[str] = LiltForTokenClassification(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : int = model(
_A ,bbox=_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : int = LiltForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(
_A ,bbox=_A ,attention_mask=_A ,token_type_ids=_A ,start_positions=_A ,end_positions=_A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
),
) : List[Any] = config_and_inputs
_lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
return True
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = LiltModelTester(self )
_lowerCAmelCase : str = ConfigTester(self ,config_class=_A ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = LiltModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(_A )
_lowerCAmelCase : Dict = torch.tensor([[1, 2]] ,device=_A )
_lowerCAmelCase : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] ,device=_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(input_ids=_A ,bbox=_A )
_lowerCAmelCase : Tuple = torch.Size([1, 2, 768] )
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] ,device=_A ,)
self.assertTrue(outputs.last_hidden_state.shape ,_A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] ,_A ,atol=1E-3 ) )
| 16 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [0] * len(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[int] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCamelCase )
while queue:
_lowerCAmelCase : Any = queue.pop(0 )
cnt += 1
topo.append(_lowerCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowerCamelCase )
if cnt != len(_lowerCamelCase ):
print('Cycle exists' )
else:
print(_lowerCamelCase )
# Adjacency List of Graph
_lowerCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 16 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "swin2sr"
_UpperCAmelCase = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self ,_A=64 ,_A=1 ,_A=3 ,_A=180 ,_A=[6, 6, 6, 6, 6, 6] ,_A=[6, 6, 6, 6, 6, 6] ,_A=8 ,_A=2.0 ,_A=True ,_A=0.0 ,_A=0.0 ,_A=0.1 ,_A="gelu" ,_A=False ,_A=0.0_2 ,_A=1E-5 ,_A=2 ,_A=1.0 ,_A="1conv" ,_A="pixelshuffle" ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : int = embed_dim
_lowerCAmelCase : List[Any] = depths
_lowerCAmelCase : List[Any] = len(_A )
_lowerCAmelCase : Optional[int] = num_heads
_lowerCAmelCase : Dict = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Dict = qkv_bias
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = drop_path_rate
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Dict = use_absolute_embeddings
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Tuple = upscale
_lowerCAmelCase : Tuple = img_range
_lowerCAmelCase : Optional[Any] = resi_connection
_lowerCAmelCase : Optional[Any] = upsampler
| 16 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 1 |
"""simple docstring"""
from math import isclose, sqrt
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = point_y / 4 / point_x
_lowerCAmelCase : Any = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_lowerCAmelCase : Dict = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_lowerCAmelCase : Any = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_lowerCAmelCase : Tuple = outgoing_gradient**2 + 4
_lowerCAmelCase : Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_lowerCAmelCase : List[str] = (point_y - outgoing_gradient * point_x) ** 2 - 100
_lowerCAmelCase : int = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_lowerCAmelCase : str = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_lowerCAmelCase : Dict = x_minus if isclose(_lowerCamelCase , _lowerCamelCase ) else x_plus
_lowerCAmelCase : Dict = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCamelCase__ ( _lowerCamelCase = 1.4 , _lowerCamelCase = -9.6 ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : float = first_x_coord
_lowerCAmelCase : float = first_y_coord
_lowerCAmelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[str] = next_point(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase = get_tests_dir("""fixtures""")
_lowerCAmelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_lowerCAmelCase = get_tests_dir("""fixtures/dummy-config.json""")
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = 0
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : int = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_lowerCAmelCase : Any = AutoFeatureExtractor.from_pretrained(_A ).to_dict()
config_dict.pop('feature_extractor_type' )
_lowerCAmelCase : str = WavaVecaFeatureExtractor(**_A )
# save in new folder
model_config.save_pretrained(_A )
config.save_pretrained(_A )
_lowerCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(_A )
# make sure private variable is not incorrectly saved
_lowerCAmelCase : Dict = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A ,'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained('bert-base' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A ,r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A ,revision='aaaaaa' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A ,'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,):
_lowerCAmelCase : str = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(_A ):
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
_lowerCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=_A )
_lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
_lowerCAmelCase : str = AutoFeatureExtractor.from_pretrained(_A ,trust_remote_code=_A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
def __lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register('custom' ,_A )
AutoFeatureExtractor.register(_A ,_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoFeatureExtractor.register(_A ,_A )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCAmelCase : Tuple = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
_lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self ):
'''simple docstring'''
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = True
try:
AutoConfig.register('custom' ,_A )
AutoFeatureExtractor.register(_A ,_A )
# If remote code is not set, the default is to use local
_lowerCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_lowerCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_lowerCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(not hasattr(_A ,'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 16 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowerCAmelCase : Any = model_type_to_module_name(_lowerCamelCase )
_lowerCAmelCase : int = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(_lowerCamelCase , _lowerCamelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowerCamelCase , '__name__' , _lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowerCAmelCase : Optional[int] = importlib.import_module('transformers' )
if hasattr(_lowerCamelCase , _lowerCamelCase ):
return getattr(_lowerCamelCase , _lowerCamelCase )
return None
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_file_from_repo(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_lowerCamelCase , encoding='utf-8' ) as reader:
return json.load(_lowerCamelCase )
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_A )
def __lowerCamelCase ( cls ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : int = kwargs.pop('config' ,_A )
_lowerCAmelCase : Dict = kwargs.pop('trust_remote_code' ,_A )
_lowerCAmelCase : str = True
_lowerCAmelCase, _lowerCAmelCase : str = FeatureExtractionMixin.get_feature_extractor_dict(_A ,**_A )
_lowerCAmelCase : List[Any] = config_dict.get('feature_extractor_type' ,_A )
_lowerCAmelCase : Tuple = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_lowerCAmelCase : Union[str, Any] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_A ,_A ):
_lowerCAmelCase : Any = AutoConfig.from_pretrained(_A ,**_A )
# It could be in `config.feature_extractor_type``
_lowerCAmelCase : Optional[int] = getattr(_A ,'feature_extractor_type' ,_A )
if hasattr(_A ,'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
_lowerCAmelCase : Optional[Any] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
_lowerCAmelCase : Optional[Any] = feature_extractor_class_from_name(_A )
_lowerCAmelCase : Optional[int] = feature_extractor_auto_map is not None
_lowerCAmelCase : str = feature_extractor_class is not None or type(_A ) in FEATURE_EXTRACTOR_MAPPING
_lowerCAmelCase : Tuple = resolve_trust_remote_code(
_A ,_A ,_A ,_A )
if has_remote_code and trust_remote_code:
_lowerCAmelCase : List[Any] = get_class_from_dynamic_module(
_A ,_A ,**_A )
_lowerCAmelCase : List[Any] = kwargs.pop('code_revision' ,_A )
if os.path.isdir(_A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_A ,**_A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_A ,**_A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_A ) in FEATURE_EXTRACTOR_MAPPING:
_lowerCAmelCase : List[str] = FEATURE_EXTRACTOR_MAPPING[type(_A )]
return feature_extractor_class.from_dict(_A ,**_A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCamelCase ( _A ,_A ):
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(_A ,_A )
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """▁"""
_lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowerCAmelCase = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="</s>" ,_A="</s>" ,_A="<s>" ,_A="<unk>" ,_A="<pad>" ,_A="<mask>" ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,cls_token=_A ,pad_token=_A ,mask_token=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
_lowerCAmelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = len(self.sp_model ) + self.fairseq_offset
_lowerCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.__dict__.copy()
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : int = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
_lowerCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : int = [self.sep_token_id]
_lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.encode(_A ,out_type=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Any = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "blenderbot-small"
_UpperCAmelCase = ["past_key_values"]
_UpperCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self ,_A=5_0265 ,_A=512 ,_A=8 ,_A=2048 ,_A=16 ,_A=8 ,_A=2048 ,_A=16 ,_A=0.0 ,_A=0.0 ,_A=True ,_A=True ,_A="gelu" ,_A=512 ,_A=0.1 ,_A=0.0 ,_A=0.0 ,_A=0.0_2 ,_A=1 ,_A=False ,_A=0 ,_A=1 ,_A=2 ,_A=2 ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : List[str] = d_model
_lowerCAmelCase : List[Any] = encoder_ffn_dim
_lowerCAmelCase : int = encoder_layers
_lowerCAmelCase : List[str] = encoder_attention_heads
_lowerCAmelCase : List[Any] = decoder_ffn_dim
_lowerCAmelCase : Dict = decoder_layers
_lowerCAmelCase : Tuple = decoder_attention_heads
_lowerCAmelCase : str = dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : str = activation_dropout
_lowerCAmelCase : Dict = activation_function
_lowerCAmelCase : Optional[Any] = init_std
_lowerCAmelCase : Optional[Any] = encoder_layerdrop
_lowerCAmelCase : Tuple = decoder_layerdrop
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : Union[str, Any] = encoder_layers
_lowerCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A ,is_encoder_decoder=_A ,decoder_start_token_id=_A ,forced_eos_token_id=_A ,**_A ,)
class __UpperCamelCase ( a__ ):
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowerCAmelCase : str = {0: 'batch'}
_lowerCAmelCase : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowerCAmelCase : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
_lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowerCAmelCase, _lowerCAmelCase : Any = self.num_layers
for i in range(_A ):
_lowerCAmelCase : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
_lowerCAmelCase : int = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowerCAmelCase : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Union[str, Any] = super().outputs
else:
_lowerCAmelCase : Optional[int] = super(_A ,self ).outputs
if self.use_past:
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self.num_layers
for i in range(_A ):
_lowerCAmelCase : Any = {0: 'batch', 2: 'past_sequence + sequence'}
_lowerCAmelCase : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCamelCase ( self ,_A ,_A = -1 ,_A = -1 ,_A = False ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A ,_A ,_A ,_A ,_A )
# Generate decoder inputs
_lowerCAmelCase : List[str] = seq_length if not self.use_past else 1
_lowerCAmelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A ,_A ,_A ,_A ,_A )
_lowerCAmelCase : str = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase : Optional[int] = dict(**_A ,**_A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase, _lowerCAmelCase : str = common_inputs['input_ids'].shape
_lowerCAmelCase : Tuple = common_inputs['decoder_input_ids'].shape[1]
_lowerCAmelCase, _lowerCAmelCase : int = self.num_attention_heads
_lowerCAmelCase : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : List[Any] = decoder_seq_length + 3
_lowerCAmelCase : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase : Optional[Any] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_A ,_A )] ,dim=1 )
_lowerCAmelCase : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self.num_layers
_lowerCAmelCase : List[Any] = min(_A ,_A )
_lowerCAmelCase : str = max(_A ,_A ) - min_num_layers
_lowerCAmelCase : Any = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
_lowerCAmelCase : Dict = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_A ,_A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def __lowerCamelCase ( self ,_A ,_A = -1 ,_A = -1 ,_A = False ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A ,_A ,_A ,_A ,_A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase, _lowerCAmelCase : List[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase : str = seqlen + 2
_lowerCAmelCase, _lowerCAmelCase : Dict = self.num_layers
_lowerCAmelCase, _lowerCAmelCase : Any = self.num_attention_heads
_lowerCAmelCase : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : Optional[int] = common_inputs['attention_mask'].dtype
_lowerCAmelCase : Dict = torch.cat(
[common_inputs['attention_mask'], torch.ones(_A ,_A ,dtype=_A )] ,dim=1 )
_lowerCAmelCase : str = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def __lowerCamelCase ( self ,_A ,_A = -1 ,_A = -1 ,_A = False ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : str = compute_effective_axis_dimension(
_A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase : Optional[int] = tokenizer.num_special_tokens_to_add(_A )
_lowerCAmelCase : str = compute_effective_axis_dimension(
_A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase : str = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase : Tuple = dict(tokenizer(_A ,return_tensors=_A ) )
return common_inputs
def __lowerCamelCase ( self ,_A ,_A = -1 ,_A = -1 ,_A = False ,_A = None ,):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A ,batch_size=_A ,seq_length=_A ,is_pair=_A ,framework=_A )
elif self.task == "causal-lm":
_lowerCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
_A ,batch_size=_A ,seq_length=_A ,is_pair=_A ,framework=_A )
else:
_lowerCAmelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A ,batch_size=_A ,seq_length=_A ,is_pair=_A ,framework=_A )
return common_inputs
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Optional[Any] = super()._flatten_past_key_values_(_A ,_A ,_A ,_A )
else:
_lowerCAmelCase : str = super(_A ,self )._flatten_past_key_values_(
_A ,_A ,_A ,_A )
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 1 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "xlnet"
_UpperCAmelCase = ["mems"]
_UpperCAmelCase = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,_A=3_2000 ,_A=1024 ,_A=24 ,_A=16 ,_A=4096 ,_A="gelu" ,_A=True ,_A="bi" ,_A=0.0_2 ,_A=1E-12 ,_A=0.1 ,_A=512 ,_A=None ,_A=True ,_A=False ,_A=False ,_A=-1 ,_A=False ,_A="last" ,_A=True ,_A="tanh" ,_A=0.1 ,_A=5 ,_A=5 ,_A=5 ,_A=1 ,_A=2 ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Dict = n_layer
_lowerCAmelCase : int = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
_lowerCAmelCase : List[Any] = d_model // n_head
_lowerCAmelCase : List[Any] = ff_activation
_lowerCAmelCase : List[str] = d_inner
_lowerCAmelCase : int = untie_r
_lowerCAmelCase : int = attn_type
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Optional[Any] = dropout
_lowerCAmelCase : Any = mem_len
_lowerCAmelCase : List[str] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : Any = clamp_len
_lowerCAmelCase : Dict = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : Optional[Any] = summary_use_proj
_lowerCAmelCase : int = summary_activation
_lowerCAmelCase : str = summary_last_dropout
_lowerCAmelCase : Any = start_n_top
_lowerCAmelCase : List[Any] = end_n_top
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Any = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' ,_A ,)
_lowerCAmelCase : List[Any] = kwargs['use_cache']
_lowerCAmelCase : str = use_mems_eval
_lowerCAmelCase : Optional[int] = use_mems_train
super().__init__(pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A ,**_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 16 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase = list[tuple[int, int]]
_lowerCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowerCAmelCase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __UpperCamelCase :
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = pos_x
_lowerCAmelCase : int = pos_y
_lowerCAmelCase : Union[str, Any] = (pos_y, pos_x)
_lowerCAmelCase : List[str] = goal_x
_lowerCAmelCase : Optional[Any] = goal_y
_lowerCAmelCase : Optional[Any] = g_cost
_lowerCAmelCase : str = parent
_lowerCAmelCase : Dict = self.calculate_heuristic()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = abs(self.pos_x - self.goal_x )
_lowerCAmelCase : Any = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self ,_A ):
'''simple docstring'''
return self.f_cost < other.f_cost
class __UpperCamelCase :
def __init__( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_A )
_lowerCAmelCase : int = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,9_9999 ,_A )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : Union[str, Any] = False
def __lowerCamelCase ( self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : List[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_lowerCAmelCase : Any = True
return self.retrace_path(_A )
self.closed_nodes.append(_A )
_lowerCAmelCase : Dict = self.get_successors(_A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_A )
else:
# retrieve the best current path
_lowerCAmelCase : Any = self.open_nodes.pop(self.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_A )
else:
self.open_nodes.append(_A )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for action in delta:
_lowerCAmelCase : List[str] = parent.pos_x + action[1]
_lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_A ,_A ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_A ,) )
return successors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = node
_lowerCAmelCase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_lowerCAmelCase = (0, 0)
_lowerCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
_lowerCAmelCase = GreedyBestFirst(init, goal)
_lowerCAmelCase = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_lowerCAmelCase = 2
for elem in grid:
print(elem)
| 16 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCAmelCase = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
_lowerCAmelCase = dataset.iloc[:, 1:2].values
_lowerCAmelCase = dataset.iloc[:, 2].values
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCAmelCase = PolynomialFeatures(degree=4)
_lowerCAmelCase = poly_reg.fit_transform(X)
_lowerCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCamelCase__ ( ):
'''simple docstring'''
plt.scatter(_lowerCamelCase , _lowerCamelCase , color='red' )
plt.plot(_lowerCamelCase , pol_reg.predict(poly_reg.fit_transform(_lowerCamelCase ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 16 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
_lowerCAmelCase = {
"""camembert-base""": 5_1_2,
}
_lowerCAmelCase = """▁"""
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="</s>" ,_A="</s>" ,_A="<s>" ,_A="<unk>" ,_A="<pad>" ,_A="<mask>" ,_A=["<s>NOTUSED", "</s>NOTUSED"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : int = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,cls_token=_A ,pad_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
_lowerCAmelCase : Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_lowerCAmelCase : Optional[int] = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
_lowerCAmelCase : List[str] = len(self.fairseq_tokens_to_ids )
_lowerCAmelCase : Union[str, Any] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_lowerCAmelCase : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
_lowerCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : int = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.encode(_A ,out_type=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = ''
_lowerCAmelCase : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
_lowerCAmelCase : str = True
_lowerCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(_A )
_lowerCAmelCase : Union[str, Any] = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : int = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 1 |
"""simple docstring"""
_lowerCAmelCase = 8.31_4462 # Unit - J mol-1 K-1
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 16 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
return True
# Recursive Step
for i in range(_lowerCamelCase ):
if valid_coloring(graph[index] , _lowerCamelCase , _lowerCamelCase ):
# Color current vertex
_lowerCAmelCase : Any = i
# Validate coloring
if util_color(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index + 1 ):
return True
# Backtrack
_lowerCAmelCase : int = -1
return False
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = [-1] * len(_lowerCamelCase )
if util_color(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 0 ):
return colored_vertices
return []
| 16 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 1 |
"""simple docstring"""
from math import ceil, sqrt
def lowerCamelCase__ ( _lowerCamelCase = 1000000 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowerCAmelCase : List[Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowerCAmelCase : List[Any] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 1 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
_lowerCAmelCase = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
_lowerCAmelCase = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
_lowerCAmelCase = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
_lowerCAmelCase = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 16 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 1 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = None
if token is not None:
_lowerCAmelCase : Union[str, Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
_lowerCAmelCase : List[str] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_lowerCAmelCase : List[Any] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
_lowerCAmelCase : int = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
_lowerCAmelCase : Union[str, Any] = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = requests.get(url + f"""&page={i + 2}""" , headers=_lowerCamelCase ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Dict = None
if token is not None:
_lowerCAmelCase : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
_lowerCAmelCase : Optional[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_lowerCAmelCase : List[Any] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
_lowerCAmelCase : Union[str, Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
_lowerCAmelCase : int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = requests.get(url + f"""&page={i + 2}""" , headers=_lowerCamelCase ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = None
if token is not None:
_lowerCAmelCase : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
_lowerCAmelCase : Union[str, Any] = requests.get(_lowerCamelCase , headers=_lowerCamelCase , allow_redirects=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = result.headers['Location']
_lowerCAmelCase : Optional[Any] = requests.get(_lowerCamelCase , allow_redirects=_lowerCamelCase )
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase , f"""{artifact_name}.zip""" )
with open(_lowerCamelCase , 'wb' ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Any = []
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Union[str, Any] = None
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_lowerCamelCase ) as f:
for line in f:
_lowerCAmelCase : Any = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase : Any = line[: line.index(': ' )]
_lowerCAmelCase : List[str] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
_lowerCAmelCase : List[Any] = line[len('FAILED ' ) :]
failed_tests.append(_lowerCamelCase )
elif filename == "job_name.txt":
_lowerCAmelCase : List[str] = line
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(_lowerCamelCase )} for `errors` """
f"""and {len(_lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
' problem.' )
_lowerCAmelCase : Dict = None
if job_name and job_links:
_lowerCAmelCase : Tuple = job_links.get(_lowerCamelCase , _lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase : List[str] = [x + [y] + [job_link] for x, y in zip(_lowerCamelCase , _lowerCamelCase )]
return result
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : int = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_lowerCamelCase , job_links=_lowerCamelCase ) )
return errors
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase : Optional[Any] = counter.most_common()
_lowerCAmelCase : Optional[int] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase : Optional[int] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase : List[Any] = dict(sorted(r.items() , key=lambda _lowerCamelCase : item[1]["count"] , reverse=_lowerCamelCase ) )
return r
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = test.split('::' )[0]
if test.startswith('tests/models/' ):
_lowerCAmelCase : List[str] = test.split('/' )[2]
else:
_lowerCAmelCase : Tuple = None
return test
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase : List[Any] = [x for x in logs if x[2] is not None]
_lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
_lowerCAmelCase : Optional[Any] = {}
for test in tests:
_lowerCAmelCase : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase : List[str] = counter.most_common()
_lowerCAmelCase : str = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase : Dict = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase : Union[str, Any] = {'count': n_errors, 'errors': error_counts}
_lowerCAmelCase : Optional[Any] = dict(sorted(r.items() , key=lambda _lowerCamelCase : item[1]["count"] , reverse=_lowerCamelCase ) )
return r
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = '| no. | error | status |'
_lowerCAmelCase : Any = '|-:|:-|:-|'
_lowerCAmelCase : Dict = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase : Optional[Any] = reduced_by_error[error]['count']
_lowerCAmelCase : Any = f"""| {count} | {error[:100]} | |"""
lines.append(_lowerCamelCase )
return "\n".join(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = '| model | no. of errors | major error | count |'
_lowerCAmelCase : Dict = '|-:|-:|-:|-:|'
_lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase : Dict = reduced_by_model[model]['count']
_lowerCAmelCase, _lowerCAmelCase : Tuple = list(reduced_by_model[model]['errors'].items() )[0]
_lowerCAmelCase : Union[str, Any] = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(_lowerCamelCase )
return "\n".join(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
_lowerCAmelCase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_lowerCAmelCase = get_job_links(args.workflow_run_id, token=args.token)
_lowerCAmelCase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_lowerCAmelCase = k.find(""" / """)
_lowerCAmelCase = k[index + len(""" / """) :]
_lowerCAmelCase = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_lowerCAmelCase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_lowerCAmelCase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_lowerCAmelCase = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_lowerCAmelCase = reduce_by_error(errors)
_lowerCAmelCase = reduce_by_model(errors)
_lowerCAmelCase = make_github_table(reduced_by_error)
_lowerCAmelCase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 16 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=13 ,_A=30 ,_A=2 ,_A=3 ,_A=True ,_A=True ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=10 ,_A=0.0_2 ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Union[str, Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : List[Any] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[Any] = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_A ,initializer_range=self.initializer_range ,)
return config, pixel_values
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = FlaxViTModel(config=_A )
_lowerCAmelCase : str = model(_A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : str = (self.image_size, self.image_size)
_lowerCAmelCase : Optional[Any] = (self.patch_size, self.patch_size)
_lowerCAmelCase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = self.type_sequence_label_size
_lowerCAmelCase : Optional[Any] = FlaxViTForImageClassification(config=_A )
_lowerCAmelCase : str = model(_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : str = 1
_lowerCAmelCase : Tuple = FlaxViTForImageClassification(_A )
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : List[Any] = model(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
), (
_lowerCAmelCase
),
) : Optional[Any] = config_and_inputs
_lowerCAmelCase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = FlaxViTModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self ,config_class=_A ,has_text_modality=_A ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(_A )
_lowerCAmelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : List[str] = self._prepare_for_class(_A ,_A )
_lowerCAmelCase : Optional[Any] = model_class(_A )
@jax.jit
def model_jitted(_A ,**_A ):
return model(pixel_values=_A ,**_A )
with self.subTest('JIT Enabled' ):
_lowerCAmelCase : Any = model_jitted(**_A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCAmelCase : Dict = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) ,len(_A ) )
for jitted_output, output in zip(_A ,_A ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Any = model_class_name.from_pretrained('google/vit-base-patch16-224' )
_lowerCAmelCase : Optional[int] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_A )
| 16 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = "swin"
_UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self ,_A=224 ,_A=4 ,_A=3 ,_A=96 ,_A=[2, 2, 6, 2] ,_A=[3, 6, 12, 24] ,_A=7 ,_A=4.0 ,_A=True ,_A=0.0 ,_A=0.0 ,_A=0.1 ,_A="gelu" ,_A=False ,_A=0.0_2 ,_A=1E-5 ,_A=32 ,_A=None ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : Optional[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : Any = len(_A )
_lowerCAmelCase : Union[str, Any] = num_heads
_lowerCAmelCase : Dict = window_size
_lowerCAmelCase : Dict = mlp_ratio
_lowerCAmelCase : Tuple = qkv_bias
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : int = drop_path_rate
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Tuple = use_absolute_embeddings
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(_A ) - 1) )
_lowerCAmelCase : Optional[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(_A ) + 1 )]
_lowerCAmelCase, _lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(
out_features=_A ,out_indices=_A ,stage_names=self.stage_names )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
| 16 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
import os
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = os.path.join(os.path.dirname(_lowerCamelCase ) , 'num.txt' )
with open(_lowerCamelCase ) as file_hand:
return str(sum(int(_lowerCamelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 16 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
from itertools import product
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = sides_number
_lowerCAmelCase : Optional[Any] = max_face_number * dice_number
_lowerCAmelCase : List[str] = [0] * (max_total + 1)
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : Any = range(_lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(_lowerCamelCase , repeat=_lowerCamelCase ):
_lowerCAmelCase : Any = sum(_lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_lowerCAmelCase : Dict = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[int] = 9
_lowerCAmelCase : Optional[Any] = 4 * 9
_lowerCAmelCase : Dict = 6
for peter_total in range(_lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_lowerCAmelCase : Tuple = (4**9) * (6**6)
_lowerCAmelCase : Union[str, Any] = peter_wins_count / total_games_number
_lowerCAmelCase : Any = round(_lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class __UpperCamelCase ( a__ ):
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
super().__init__(*_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_A )
_lowerCAmelCase : Optional[int] = self.values[key]
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
sum(self.charge_factor - len(_A ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_A ) == 0
):
return key
return super()._collision_resolution(_A ,_A )
| 16 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "camembert"
def __init__( self ,_A=3_0522 ,_A=768 ,_A=12 ,_A=12 ,_A=3072 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=2 ,_A=0.0_2 ,_A=1E-12 ,_A=1 ,_A=0 ,_A=2 ,_A="absolute" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A ,**_A )
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Dict = use_cache
_lowerCAmelCase : int = classifier_dropout
class __UpperCamelCase ( a__ ):
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 16 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 1 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
_lowerCAmelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Github(os.environ['GITHUB_TOKEN'] )
_lowerCAmelCase : Union[str, Any] = g.get_repo('huggingface/transformers' )
_lowerCAmelCase : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_lowerCAmelCase : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCamelCase : i.created_at , reverse=_lowerCamelCase )
_lowerCAmelCase : Tuple = comments[0] if len(_lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 1 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_lowerCAmelCase = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_lowerCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if "://" in dataset_path:
_lowerCAmelCase : int = dataset_path.split('://' )[1]
return dataset_path
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = not is_remote_filesystem(_lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCamelCase ) , fs._strip_protocol(_lowerCamelCase ) )
else:
fs.mv(_lowerCamelCase , _lowerCamelCase , recursive=_lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Optional[int] = threading.Lock()
| 16 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 1 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TapasConfig.from_json_file(_lowerCamelCase )
# set absolute/relative position embeddings parameter
_lowerCAmelCase : Optional[int] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_lowerCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=_lowerCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
_lowerCAmelCase : Any = 4
_lowerCAmelCase : Optional[int] = True
# hparam_utils.py hparams
_lowerCAmelCase : Any = 0.664694
_lowerCAmelCase : str = 0.207951
_lowerCAmelCase : List[Any] = 0.121194
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : str = 0.0352513
_lowerCAmelCase : int = TapasForQuestionAnswering(config=_lowerCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_lowerCAmelCase : Tuple = 4
_lowerCAmelCase : Any = False
# hparam_utils.py hparams
_lowerCAmelCase : List[Any] = 36.4519
_lowerCAmelCase : List[Any] = 0.903421
_lowerCAmelCase : int = 222.088
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Tuple = 0.763141
_lowerCAmelCase : Optional[int] = TapasForQuestionAnswering(config=_lowerCamelCase )
elif task == "TABFACT":
_lowerCAmelCase : Optional[Any] = TapasForSequenceClassification(config=_lowerCamelCase )
elif task == "MLM":
_lowerCAmelCase : Union[str, Any] = TapasForMaskedLM(config=_lowerCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
_lowerCAmelCase : List[Any] = TapasModel(config=_lowerCamelCase )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCamelCase )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
_lowerCAmelCase : List[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 )
tokenizer.save_pretrained(_lowerCamelCase )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 16 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=_A ,)
assert hasattr(self ,'env' )
def __lowerCamelCase ( self ,_A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-single""" ,instance_count=_A ,instance_type=self.instance_type ,debugger_hook_config=_A ,hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,py_version='py36' ,)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,_A )
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 1 |
"""simple docstring"""
import numpy as np
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return vector * sigmoid(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCAmelCase = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
if rng is None:
_lowerCAmelCase : List[Any] = random.Random()
_lowerCAmelCase : Dict = 1
for dim in shape:
total_dims *= dim
_lowerCAmelCase : Optional[Any] = []
for _ in range(_lowerCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCAmelCase : int = np.array(_lowerCamelCase , dtype=jnp.intaa ).reshape(_lowerCamelCase )
return output
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = ids_tensor(_lowerCamelCase , vocab_size=2 , rng=_lowerCamelCase )
# make sure that at least one token is attended to for each batch
_lowerCAmelCase : str = 1
return attn_mask
@require_flax
class __UpperCamelCase :
_UpperCAmelCase = None
_UpperCAmelCase = ()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Union[str, Any] = inputs['input_ids'].shape[-1] // 2
_lowerCAmelCase : Tuple = inputs['input_ids'][:max_batch_size, :sequence_length]
_lowerCAmelCase : int = jnp.ones_like(_A )
_lowerCAmelCase : List[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCAmelCase : List[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCAmelCase : Union[str, Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self._get_input_ids_and_config()
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Dict = max_length
_lowerCAmelCase : Tuple = 0
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Dict = model_class(_A )
_lowerCAmelCase : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCAmelCase : List[str] = getattr(_A ,_A )
_lowerCAmelCase : Union[str, Any] = pt_model_class(_A ).eval()
_lowerCAmelCase : Tuple = load_flax_weights_in_pytorch_model(_A ,flax_model.params )
_lowerCAmelCase : Union[str, Any] = flax_model.generate(_A ).sequences
_lowerCAmelCase : str = pt_model.generate(torch.tensor(_A ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCAmelCase : int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCAmelCase : str = False
_lowerCAmelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(_A )
_lowerCAmelCase : Union[str, Any] = model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_A )
_lowerCAmelCase : Any = jit(model.generate )
_lowerCAmelCase : Dict = jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Any = self._get_input_ids_and_config()
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Tuple = model_class(_A )
_lowerCAmelCase : Any = model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_A )
_lowerCAmelCase : Optional[Any] = jit(model.generate )
_lowerCAmelCase : Tuple = jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self._get_input_ids_and_config()
_lowerCAmelCase : str = False
_lowerCAmelCase : List[str] = max_length
_lowerCAmelCase : int = 2
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : List[Any] = model_class(_A )
_lowerCAmelCase : Tuple = model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_A )
_lowerCAmelCase : List[str] = jit(model.generate )
_lowerCAmelCase : Optional[Any] = jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
_lowerCAmelCase : int = False
_lowerCAmelCase : Dict = max_length
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[str] = 2
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(_A )
_lowerCAmelCase : str = model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Any = self._get_input_ids_and_config()
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : int = max_length
_lowerCAmelCase : str = 0.8
_lowerCAmelCase : List[Any] = 10
_lowerCAmelCase : Any = 0.3
_lowerCAmelCase : int = 1
_lowerCAmelCase : int = 8
_lowerCAmelCase : Tuple = 9
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : List[Any] = model_class(_A )
_lowerCAmelCase : Any = model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_A )
_lowerCAmelCase : Any = jit(model.generate )
_lowerCAmelCase : Optional[int] = jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCAmelCase : Dict = max_length
_lowerCAmelCase : int = 1
_lowerCAmelCase : str = 8
_lowerCAmelCase : Optional[int] = 9
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_A )
_lowerCAmelCase : str = jit(model.generate )
_lowerCAmelCase : Union[str, Any] = jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = self._get_input_ids_and_config()
_lowerCAmelCase : Optional[Any] = max_length
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[Any] = 8
_lowerCAmelCase : List[str] = 9
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : List[str] = model_class(_A )
_lowerCAmelCase : List[Any] = model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_A )
_lowerCAmelCase : str = jit(model.generate )
_lowerCAmelCase : str = jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCAmelCase : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : List[str] = model_class(_A )
_lowerCAmelCase : Optional[int] = model.generate(_A ,attention_mask=_A ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_A )
_lowerCAmelCase : Optional[int] = jit(model.generate )
_lowerCAmelCase : Optional[Any] = jit_generate(_A ,attention_mask=_A ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCAmelCase : Dict = attention_mask.at[(0, 0)].set(0 )
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : str = model_class(_A )
_lowerCAmelCase : Any = model.generate(_A ,attention_mask=_A ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_A )
_lowerCAmelCase : Any = jit(model.generate )
_lowerCAmelCase : Dict = jit_generate(_A ,attention_mask=_A ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCAmelCase : Optional[int] = attention_mask.at[(0, 0)].set(0 )
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : List[Any] = model_class(_A )
_lowerCAmelCase : Union[str, Any] = model.generate(_A ,attention_mask=_A ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_A )
_lowerCAmelCase : Any = jit(model.generate )
_lowerCAmelCase : Any = jit_generate(_A ,attention_mask=_A ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
_lowerCAmelCase : List[Any] = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_lowerCAmelCase : Any = 'Hello world'
_lowerCAmelCase : Tuple = tokenizer(_A ,return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_A ,'do_samples' ):
model.generate(_A ,do_samples=_A )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_A ,'foo' ):
_lowerCAmelCase : List[Any] = {'foo': 'bar'}
model.generate(_A ,**_A )
| 16 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCAmelCase : Dict = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
_lowerCAmelCase : Dict = value
else:
_lowerCAmelCase : Any = value
return new_state_dict
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''
if is_panoptic:
_lowerCAmelCase : Optional[Any] = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCAmelCase : Optional[int] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCAmelCase : Tuple = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[:256, :]
_lowerCAmelCase : List[Any] = in_proj_bias[:256]
_lowerCAmelCase : Dict = in_proj_weight[256:512, :]
_lowerCAmelCase : Union[str, Any] = in_proj_bias[256:512]
_lowerCAmelCase : Any = in_proj_weight[-256:, :]
_lowerCAmelCase : Any = in_proj_bias[-256:]
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_lowerCAmelCase : Dict = 'resnet101'
if "dc5" in model_name:
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Dict = 'panoptic' in model_name
if is_panoptic:
_lowerCAmelCase : List[str] = 250
else:
_lowerCAmelCase : str = 91
_lowerCAmelCase : Any = 'huggingface/label-files'
_lowerCAmelCase : List[str] = 'coco-detection-id2label.json'
_lowerCAmelCase : int = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Union[str, Any] = idalabel
_lowerCAmelCase : Any = {v: k for k, v in idalabel.items()}
# load image processor
_lowerCAmelCase : Optional[int] = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowerCAmelCase : str = ConditionalDetrImageProcessor(format=_lowerCamelCase )
# prepare image
_lowerCAmelCase : List[str] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_lowerCamelCase , return_tensors='pt' )
_lowerCAmelCase : Dict = encoding['pixel_values']
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
_lowerCAmelCase : Any = torch.hub.load('DeppMeng/ConditionalDETR' , _lowerCamelCase , pretrained=_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_lowerCAmelCase : Optional[Any] = 'conditional_detr.' + src
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = rename_backbone_keys(_lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCamelCase , is_panoptic=_lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCAmelCase : Dict = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowerCAmelCase : List[Any] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCAmelCase : Any = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Dict = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowerCAmelCase : str = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : List[str] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : List[Any] = val
# finally, create HuggingFace model and load state dict
_lowerCAmelCase : str = ConditionalDetrForSegmentation(_lowerCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
model.push_to_hub(repo_id=_lowerCamelCase , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
_lowerCAmelCase : List[Any] = conditional_detr(_lowerCamelCase )
_lowerCAmelCase : Dict = model(_lowerCamelCase )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 1 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_lowerCAmelCase = getLogger(__name__)
_lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 8 , _lowerCamelCase = DEFAULT_DEVICE , _lowerCamelCase=False , _lowerCamelCase="summarization" , _lowerCamelCase=None , **_lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : str = Path(_lowerCamelCase ).open('w' , encoding='utf-8' )
_lowerCAmelCase : int = str(_lowerCamelCase )
_lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
if fpaa:
_lowerCAmelCase : List[Any] = model.half()
_lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(_lowerCamelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_lowerCAmelCase : Optional[Any] = time.time()
# update config with task specific params
use_task_specific_params(_lowerCamelCase , _lowerCamelCase )
if prefix is None:
_lowerCAmelCase : Optional[Any] = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(_lowerCamelCase , _lowerCamelCase ) ) ):
_lowerCAmelCase : Union[str, Any] = [prefix + text for text in examples_chunk]
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors='pt' , truncation=_lowerCamelCase , padding='longest' ).to(_lowerCamelCase )
_lowerCAmelCase : Tuple = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCamelCase , )
_lowerCAmelCase : int = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
_lowerCAmelCase : str = int(time.time() - start_time ) # seconds
_lowerCAmelCase : Tuple = len(_lowerCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase__ ( ):
'''simple docstring'''
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def lowerCamelCase__ ( _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('model_name' , type=_lowerCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=_lowerCamelCase , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=_lowerCamelCase , help='where to save summaries' )
parser.add_argument('--reference_path' , type=_lowerCamelCase , required=_lowerCamelCase , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=_lowerCamelCase , required=_lowerCamelCase , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=_lowerCamelCase , required=_lowerCamelCase , default=_lowerCamelCase , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=_lowerCamelCase , required=_lowerCamelCase , default=_lowerCamelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=_lowerCamelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=_lowerCamelCase , default=8 , required=_lowerCamelCase , help='batch size' )
parser.add_argument(
'--n_obs' , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=_lowerCamelCase , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_lowerCAmelCase, _lowerCAmelCase : Any = parser.parse_known_args()
_lowerCAmelCase : Optional[Any] = parse_numeric_n_bool_cl_kwargs(_lowerCamelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_lowerCAmelCase : List[Any] = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_lowerCAmelCase : Dict = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
_lowerCAmelCase : str = generate_summaries_or_translations(
_lowerCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCamelCase , )
if args.reference_path is None:
return {}
# Compute scores
_lowerCAmelCase : Dict = calculate_bleu if 'translation' in args.task else calculate_rouge
_lowerCAmelCase : Dict = [x.rstrip() for x in open(args.save_path ).readlines()]
_lowerCAmelCase : List[str] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCamelCase )]
_lowerCAmelCase : dict = score_fn(_lowerCamelCase , _lowerCamelCase )
scores.update(_lowerCamelCase )
if args.dump_args:
scores.update(_lowerCamelCase )
if args.info:
_lowerCAmelCase : Any = args.info
if verbose:
print(_lowerCamelCase )
if args.score_path is not None:
json.dump(_lowerCamelCase , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 16 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError('String lengths must match!' )
_lowerCAmelCase : int = 0
for chara, chara in zip(_lowerCamelCase , _lowerCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = "resnet"
_UpperCAmelCase = ["basic", "bottleneck"]
def __init__( self ,_A=3 ,_A=64 ,_A=[256, 512, 1024, 2048] ,_A=[3, 4, 6, 3] ,_A="bottleneck" ,_A="relu" ,_A=False ,_A=None ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Optional[int] = embedding_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : int = depths
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Any = downsample_in_first_stage
_lowerCAmelCase : Union[str, Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(_A ) + 1 )]
_lowerCAmelCase, _lowerCAmelCase : Dict = get_aligned_output_features_output_indices(
out_features=_A ,out_indices=_A ,stage_names=self.stage_names )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-3
| 16 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 1 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = BertJapaneseTokenizer
_UpperCAmelCase = False
_UpperCAmelCase = True
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Tuple = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
_lowerCAmelCase : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'こんにちは、世界。 \nこんばんは、世界。'
_lowerCAmelCase : List[str] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : List[Any] = self.get_input_output_texts(_A )
_lowerCAmelCase : List[str] = tokenizer.encode(_A ,add_special_tokens=_A )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(_A ,clean_up_tokenization_spaces=_A )
return text, ids
def __lowerCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __lowerCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __lowerCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase : Tuple = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(_A ,['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='mecab' )
self.assertIsNotNone(_A )
_lowerCAmelCase : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。'
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A ,['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname ,'tokenizer.bin' )
with open(_A ,'wb' ) as handle:
pickle.dump(_A ,_A )
with open(_A ,'rb' ) as handle:
_lowerCAmelCase : Optional[int] = pickle.load(_A )
_lowerCAmelCase : Optional[int] = tokenizer_new.tokenize(_A )
self.assertListEqual(_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
try:
_lowerCAmelCase : Any = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
try:
_lowerCAmelCase : int = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = MecabTokenizer(do_lower_case=_A ,mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
try:
_lowerCAmelCase : List[str] = MecabTokenizer(
do_lower_case=_A ,normalize_text=_A ,mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = MecabTokenizer(normalize_text=_A ,mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] ,)
@require_sudachi
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='sudachi' )
self.assertIsNotNone(_A )
_lowerCAmelCase : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。'
_lowerCAmelCase : str = tokenizer.tokenize(_A )
self.assertListEqual(_A ,['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname ,'tokenizer.bin' )
with open(_A ,'wb' ) as handle:
pickle.dump(_A ,_A )
with open(_A ,'rb' ) as handle:
_lowerCAmelCase : Any = pickle.load(_A )
_lowerCAmelCase : Tuple = tokenizer_new.tokenize(_A )
self.assertListEqual(_A ,_A )
@require_sudachi
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,[' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] ,)
@require_sudachi
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = SudachiTokenizer(sudachi_dict_type='core' ,sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) ,['外国', '人', '参政', '権'] )
@require_sudachi
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = SudachiTokenizer(sudachi_dict_type='core' ,sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) ,['外国人', '参政権'] )
@require_sudachi
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = SudachiTokenizer(sudachi_dict_type='core' ,sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) ,['外国人参政権'] )
@require_sudachi
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SudachiTokenizer(do_lower_case=_A ,sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,[' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] ,)
@require_sudachi
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = SudachiTokenizer(normalize_text=_A ,sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,[' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] ,)
@require_sudachi
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = SudachiTokenizer(trim_whitespace=_A ,sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] ,)
@require_jumanpp
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type='jumanpp' )
self.assertIsNotNone(_A )
_lowerCAmelCase : Dict = 'こんにちは、世界。\nこんばんは、世界。'
_lowerCAmelCase : List[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A ,['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname ,'tokenizer.bin' )
with open(_A ,'wb' ) as handle:
pickle.dump(_A ,_A )
with open(_A ,'rb' ) as handle:
_lowerCAmelCase : str = pickle.load(_A )
_lowerCAmelCase : List[str] = tokenizer_new.tokenize(_A )
self.assertListEqual(_A ,_A )
@require_jumanpp
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] ,)
@require_jumanpp
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = JumanppTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] ,)
@require_jumanpp
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = JumanppTokenizer(normalize_text=_A )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] ,)
@require_jumanpp
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = JumanppTokenizer(trim_whitespace=_A )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) ,['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] ,)
@require_jumanpp
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) ,['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
_lowerCAmelCase : Any = {}
for i, token in enumerate(_A ):
_lowerCAmelCase : int = i
_lowerCAmelCase : Tuple = WordpieceTokenizer(vocab=_A ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) ,['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) ,['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) ,['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
_lowerCAmelCase : int = tokenizer.subword_tokenizer
_lowerCAmelCase : Optional[Any] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(_A ,['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
_lowerCAmelCase : Union[str, Any] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(_A ,['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
_lowerCAmelCase : List[str] = tokenizer.encode('ありがとう。' ,add_special_tokens=_A )
_lowerCAmelCase : Optional[Any] = tokenizer.encode('どういたしまして。' ,add_special_tokens=_A )
_lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_A )
_lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_A ,_A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = BertJapaneseTokenizer
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type='character' ,**_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'こんにちは、世界。 \nこんばんは、世界。'
_lowerCAmelCase : int = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __lowerCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __lowerCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type='character' )
_lowerCAmelCase : Any = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
_A ,['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) ,[3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
_lowerCAmelCase : Tuple = {}
for i, token in enumerate(_A ):
_lowerCAmelCase : int = i
_lowerCAmelCase : Dict = CharacterTokenizer(vocab=_A ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) ,['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) ,['こ', 'ん', 'に', 'ち', '[UNK]'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
_lowerCAmelCase : List[str] = tokenizer.encode('ありがとう。' ,add_special_tokens=_A )
_lowerCAmelCase : Dict = tokenizer.encode('どういたしまして。' ,add_special_tokens=_A )
_lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(_A )
_lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(_A ,_A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cl-tohoku/bert-base-japanese'
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A ,_A )
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' ,level='WARNING' ) as cm:
BertTokenizer.from_pretrained(_A )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
_lowerCAmelCase : Tuple = 'bert-base-cased'
with self.assertLogs('transformers' ,level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(_A )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 16 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 1 |
"""simple docstring"""
from math import ceil
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = list(range(0 , _lowerCamelCase ) )
_lowerCAmelCase : List[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Tuple = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : Optional[int] = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : int = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(_lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Dict = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : List[str] = [layers[i : i + n_blocks] for i in range(0 , _lowerCamelCase , _lowerCamelCase )]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
| 16 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 1 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : str = 250
_lowerCAmelCase : str = ids_tensor((batch_size, length) ,_A )
_lowerCAmelCase : Optional[int] = torch.ones((batch_size, length) ,device=_A ,dtype=torch.float ) / length
return input_ids, scores
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Any = self._get_tensors(5 )
_lowerCAmelCase : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase, _lowerCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase, _lowerCAmelCase : int = self._get_tensors(10 )
self.assertTrue(criteria(_A ,_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = MaxLengthCriteria(max_length=10 )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = self._get_tensors(5 )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase, _lowerCAmelCase : Any = self._get_tensors(9 )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase, _lowerCAmelCase : int = self._get_tensors(10 )
self.assertTrue(criteria(_A ,_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 ,max_new_tokens=5 )
_lowerCAmelCase, _lowerCAmelCase : str = self._get_tensors(5 )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase, _lowerCAmelCase : Dict = self._get_tensors(10 )
self.assertTrue(criteria(_A ,_A ) )
_lowerCAmelCase : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length ,10 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = self._get_tensors(5 )
_lowerCAmelCase : int = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_A ,_A ) )
_lowerCAmelCase : Union[str, Any] = MaxTimeCriteria(max_time=0.1 ,initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_A ,_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) ,10 )
with self.assertWarns(_A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) ,11 )
_lowerCAmelCase : List[Any] = validate_stopping_criteria(StoppingCriteriaList() ,11 )
self.assertEqual(len(_A ) ,1 )
| 16 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 1 |
"""simple docstring"""
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_lowerCAmelCase = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(4_2)
_lowerCAmelCase = """sshleifer/student_marian_en_ro_6_1"""
_lowerCAmelCase = """sshleifer/tiny-mbart"""
@require_torch
class __UpperCamelCase ( a__ ):
def __lowerCamelCase ( self ,_A=False ,_A=None ,_A=True ,_A=True ,_A=True ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.run_trainer(
eval_steps=1 ,max_len=12 ,model_name=_A ,num_train_epochs=1 ,distributed=_A ,extra_args_str=_A ,predict_with_generate=_A ,do_train=_A ,do_eval=_A ,do_predict=_A ,)
_lowerCAmelCase : Optional[Any] = TrainerState.load_from_json(os.path.join(_A ,'trainer_state.json' ) ).log_history
if not do_eval:
return
_lowerCAmelCase : Tuple = [log for log in logs if 'eval_loss' in log.keys()]
_lowerCAmelCase : Union[str, Any] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_lowerCAmelCase : List[str] = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] ,_A )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=_A )
@require_torch_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=_A )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def __lowerCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=_A ,extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def __lowerCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=_A ,extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def __lowerCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=_A ,extra_args_str='--sharded_ddp zero_dp_2' ,predict_with_generate=_A )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def __lowerCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(
distributed=_A ,extra_args_str='--sharded_ddp zero_dp_2 --fp16' ,predict_with_generate=_A )
@require_apex
@require_torch_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=_A ,extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_A ,extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
_lowerCAmelCase : int = experiments[experiment_id]
_lowerCAmelCase : List[Any] = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
_lowerCAmelCase : Tuple = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_A ,extra_args_str=data['extra_args_str'] )
_lowerCAmelCase : int = len(re.findall(_A ,cl.err ) )
self.assertEqual(_A ,data['n_matches'] )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.run_trainer(
eval_steps=2 ,max_len=128 ,model_name=_A ,learning_rate=3E-4 ,num_train_epochs=10 ,distributed=_A ,)
# Check metrics
_lowerCAmelCase : Tuple = TrainerState.load_from_json(os.path.join(_A ,'trainer_state.json' ) ).log_history
_lowerCAmelCase : Union[str, Any] = [log for log in logs if 'eval_loss' in log.keys()]
_lowerCAmelCase : int = eval_metrics[0]
_lowerCAmelCase : List[str] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] ,_A )
# test if do_predict saves generations and metrics
_lowerCAmelCase : str = os.listdir(_A )
_lowerCAmelCase : Dict = {os.path.basename(_A ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __lowerCamelCase ( self ):
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_A ) -> Tuple[int, float]:
_lowerCAmelCase : List[str] = '--skip_memory_metrics 0'
_lowerCAmelCase : str = self.run_trainer(
max_len=128 ,model_name=_A ,learning_rate=3E-4 ,num_train_epochs=1 ,optim=_A ,distributed=_A ,extra_args_str=_A ,do_eval=_A ,do_predict=_A ,n_gpus_to_use=1 ,)
# Check metrics
_lowerCAmelCase : str = TrainerState.load_from_json(Path(_A ,'trainer_state.json' ) ).log_history
_lowerCAmelCase : List[Any] = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
_lowerCAmelCase : List[str] = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
_lowerCAmelCase : Optional[Any] = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : str = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_lowerCAmelCase : Union[str, Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_lowerCAmelCase : List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig
_lowerCAmelCase : List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_lowerCAmelCase : List[str] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_lowerCAmelCase : Any = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_A ,_A ,'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" ,)
self.assertGreater(
_A ,_A ,'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" ,)
self.assertEqual(
_A ,_A ,F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = 3E-3 ,_A = "adafactor" ,_A = False ,_A = None ,_A = 0 ,_A = True ,_A = True ,_A = True ,_A = True ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
_lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Tuple = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(_A )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(_A )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
_lowerCAmelCase : Dict = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(_A )}
""".split()
_lowerCAmelCase : List[str] = '\n --do_predict\n '.split()
_lowerCAmelCase : int = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_lowerCAmelCase : List[Any] = get_gpu_count()
_lowerCAmelCase : List[str] = get_torch_dist_unique_port()
_lowerCAmelCase : Tuple = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
_lowerCAmelCase : Optional[int] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_A ,env=self.get_env() )
else:
_lowerCAmelCase : Optional[Any] = ['run_translation.py'] + args
with patch.object(_A ,'argv' ,_A ):
main()
return output_dir
| 16 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = BertConfig.from_json_file(_lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
_lowerCAmelCase : List[Any] = BertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 16 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 1 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=_A ,)
assert hasattr(self ,'env' )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = {
'enabled': True,
'processes_per_host': 8,
}
_lowerCAmelCase : Dict = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowerCAmelCase : List[str] = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowerCAmelCase : Dict = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=_A ,instance_type=self.instance_type ,debugger_hook_config=_A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=_A ,py_version='py36' ,)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCAmelCase : str = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,_A )
| 16 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,'schedulers/' ) )
_lowerCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(A__ ,'src/diffusers/schedulers/scheduling_ddpm.py' ) ,os.path.join(self.diffusers_dir ,'schedulers/scheduling_ddpm.py' ) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'src/diffusers'
shutil.rmtree(self.diffusers_dir )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCAmelCase : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
_lowerCAmelCase : List[str] = black.format_str(A__ ,mode=A__ )
_lowerCAmelCase : int = os.path.join(self.diffusers_dir ,'new_code.py' )
with open(A__ ,'w' ,newline='\n' ) as f:
f.write(A__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=A__ )
with open(A__ ,'r' ) as f:
self.assertTrue(f.read() ,A__ )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' )
self.assertEqual(A__ ,A__ )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' ,'DDPMSchedulerOutput' ,REFERENCE_CODE + '\n' ,)
# With no empty line at the end
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' ,'DDPMSchedulerOutput' ,A__ ,)
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' ,'TestSchedulerOutput' ,re.sub('DDPM' ,'Test' ,A__ ) ,)
# Copy consistency with a really long name
_lowerCAmelCase : str = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" ,F"""{long_class_name}SchedulerOutput""" ,re.sub('Bert' ,A__ ,A__ ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' ,'TestSchedulerOutput' ,A__ ,overwrite_result=re.sub('DDPM' ,'Test' ,A__ ) ,)
| 700 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 0 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.load(__A , map_location='cpu' )
_lowerCAmelCase : List[Any] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
_lowerCAmelCase : int = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_lowerCAmelCase : Optional[int] = v
else:
_lowerCAmelCase : List[Any] = v
_lowerCAmelCase : Tuple = chkpt['params']
_lowerCAmelCase : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__A , (torch.FloatTensor, numpy.ndarray) )}
_lowerCAmelCase : List[str] = chkpt['dico_word2id']
_lowerCAmelCase : List[Any] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
_lowerCAmelCase : int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowerCAmelCase : Any = pytorch_dump_folder_path + '/' + CONFIG_NAME
_lowerCAmelCase : Optional[Any] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(__A , __A )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , indent=2 ) + '\n' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__A , indent=2 ) + '\n' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 701 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 0 |
import argparse
import os
import re
import packaging.version
_lowerCAmelCase = 'examples/'
_lowerCAmelCase = {
'examples': (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), 'release = "VERSION"\n'),
}
_lowerCAmelCase = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
_lowerCAmelCase = 'README.md'
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase : Tuple = f.read()
_lowerCAmelCase, _lowerCAmelCase : Tuple = REPLACE_PATTERNS[pattern]
_lowerCAmelCase : Tuple = replace.replace('VERSION' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase : str = re_pattern.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , pattern='examples' )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = '🤗 Transformers currently provides the following architectures'
_lowerCAmelCase : List[str] = '1. Want to contribute a new model?'
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase : int = f.readlines()
# Find the start of the list.
_lowerCAmelCase : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowerCAmelCase : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
_lowerCAmelCase : Union[str, Any] = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
_lowerCAmelCase : Tuple = f.read()
_lowerCAmelCase : Tuple = REPLACE_PATTERNS['init'][0].search(_SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
_lowerCAmelCase : str = default_version.base_version
elif patch:
_lowerCAmelCase : Dict = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_lowerCAmelCase : Tuple = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_lowerCAmelCase : int = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
_lowerCAmelCase : Dict = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_SCREAMING_SNAKE_CASE , patch=_SCREAMING_SNAKE_CASE )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = get_version()
_lowerCAmelCase : str = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_lowerCAmelCase : List[str] = current_version.base_version
# Check with the user we got that right.
_lowerCAmelCase : str = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
_lowerCAmelCase : Dict = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_SCREAMING_SNAKE_CASE )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowerCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 702 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
import bisect
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : List[str] = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_lowerCAmelCase : Optional[int] = mid + 1
else:
_lowerCAmelCase : Dict = mid
return lo
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : int = len(_lowerCamelCase )
while lo < hi:
_lowerCAmelCase : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_lowerCAmelCase : Tuple = mid + 1
else:
_lowerCAmelCase : Union[str, Any] = mid
return lo
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Tuple = len(_lowerCamelCase ) - 1
while left <= right:
_lowerCAmelCase : str = left + (right - left) // 2
_lowerCAmelCase : Dict = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_lowerCAmelCase : int = midpoint - 1
else:
_lowerCAmelCase : Union[str, Any] = midpoint + 1
return None
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase )
if index != len(_lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if right < left:
return None
_lowerCAmelCase : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
_lowerCAmelCase = sorted(int(item) for item in user_input.split(""","""))
_lowerCAmelCase = int(input("""Enter a single number to be found in the list:\n"""))
_lowerCAmelCase = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 703 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [[1, 2, 4], [1, 2, 3, 4]]
_lowerCAmelCase : Tuple = DisjunctiveConstraint(UpperCamelCase_ )
self.assertTrue(isinstance(dc.token_ids ,UpperCamelCase_ ) )
with self.assertRaises(UpperCamelCase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCamelCase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCamelCase_ ):
DisjunctiveConstraint(UpperCamelCase_ ) # fails here
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [[1, 2, 3], [1, 2, 4]]
_lowerCAmelCase : Optional[int] = DisjunctiveConstraint(UpperCamelCase_ )
_lowerCAmelCase : Any = dc.update(1 )
_lowerCAmelCase : Dict = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_lowerCAmelCase : Dict = dc.update(2 )
_lowerCAmelCase : int = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_lowerCAmelCase : Optional[int] = dc.update(3 )
_lowerCAmelCase : str = stepped is True and completed is True and reset is False
self.assertTrue(UpperCamelCase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_lowerCAmelCase : Union[str, Any] = DisjunctiveConstraint(UpperCamelCase_ )
_lowerCAmelCase : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_lowerCAmelCase : List[str] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_lowerCAmelCase : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_lowerCAmelCase : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_lowerCAmelCase : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_lowerCAmelCase : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_lowerCAmelCase : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 704 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __UpperCamelCase ( lowercase__ ):
_UpperCAmelCase = "decision_transformer"
_UpperCAmelCase = ["past_key_values"]
_UpperCAmelCase = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,_A=17 ,_A=4 ,_A=128 ,_A=4096 ,_A=True ,_A=1 ,_A=1024 ,_A=3 ,_A=1 ,_A=None ,_A="relu" ,_A=0.1 ,_A=0.1 ,_A=0.1 ,_A=1E-5 ,_A=0.0_2 ,_A=True ,_A=True ,_A=5_0256 ,_A=5_0256 ,_A=False ,_A=False ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = state_dim
_lowerCAmelCase : Optional[Any] = act_dim
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Optional[int] = max_ep_len
_lowerCAmelCase : List[Any] = action_tanh
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Any = n_positions
_lowerCAmelCase : Optional[int] = n_layer
_lowerCAmelCase : int = n_head
_lowerCAmelCase : Dict = n_inner
_lowerCAmelCase : List[Any] = activation_function
_lowerCAmelCase : List[Any] = resid_pdrop
_lowerCAmelCase : Dict = embd_pdrop
_lowerCAmelCase : Any = attn_pdrop
_lowerCAmelCase : List[str] = layer_norm_epsilon
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : int = scale_attn_weights
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = scale_attn_by_inverse_layer_idx
_lowerCAmelCase : int = reorder_and_upcast_attn
_lowerCAmelCase : Any = bos_token_id
_lowerCAmelCase : Tuple = eos_token_id
super().__init__(bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
| 705 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_lowerCAmelCase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCAmelCase : str = 'lm_head'
_lowerCAmelCase : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
_lowerCAmelCase : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
_lowerCAmelCase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_v":
_lowerCAmelCase : List[Any] = value
elif weight_type == "bias":
_lowerCAmelCase : Any = value
else:
_lowerCAmelCase : Optional[int] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = fairseq_model.state_dict()
_lowerCAmelCase : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
_lowerCAmelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowerCAmelCase : Any = True
if "*" in mapped_key:
_lowerCAmelCase : Any = name.split(lowerCamelCase_ )[0].split('.' )[-2]
_lowerCAmelCase : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_ )
if "weight_g" in name:
_lowerCAmelCase : int = 'weight_g'
elif "weight_v" in name:
_lowerCAmelCase : Any = 'weight_v'
elif "bias" in name:
_lowerCAmelCase : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : Any = 'weight'
else:
_lowerCAmelCase : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = full_name.split('conv_layers.' )[-1]
_lowerCAmelCase : List[Any] = name.split('.' )
_lowerCAmelCase : Any = int(items[0] )
_lowerCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowerCAmelCase : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowerCAmelCase : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowerCAmelCase : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowerCAmelCase : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ):
'''simple docstring'''
if config_path is not None:
_lowerCAmelCase : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_ )
else:
_lowerCAmelCase : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCAmelCase : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase : List[Any] = target_dict.pad_index
_lowerCAmelCase : Dict = target_dict.bos_index
_lowerCAmelCase : Union[str, Any] = target_dict.eos_index
_lowerCAmelCase : Tuple = len(target_dict.symbols )
_lowerCAmelCase : Dict = os.path.join(lowerCamelCase_ , 'vocab.json' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_ ) )
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase : Any = 42
_lowerCAmelCase : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
_lowerCAmelCase : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
_lowerCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
_lowerCAmelCase : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
_lowerCAmelCase : Dict = UniSpeechForCTC(lowerCamelCase_ )
else:
_lowerCAmelCase : List[Any] = UniSpeechForPreTraining(lowerCamelCase_ )
if is_finetuned:
_lowerCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
_lowerCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCAmelCase : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
hf_unispeech.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_lowerCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 706 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_lowerCAmelCase = None
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
_lowerCAmelCase = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
_lowerCAmelCase = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __UpperCamelCase ( UpperCAmelCase_ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = ['input_ids', 'attention_mask']
_UpperCAmelCase = MBartTokenizer
_UpperCAmelCase = []
_UpperCAmelCase = []
def __init__( self ,_A=None ,_A=None ,_A="<s>" ,_A="</s>" ,_A="</s>" ,_A="<s>" ,_A="<unk>" ,_A="<pad>" ,_A="<mask>" ,_A=None ,_A=None ,_A=None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = AddedToken(_lowercase ,lstrip=_lowercase ,rstrip=_lowercase ) if isinstance(_lowercase ,_lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase ,tokenizer_file=_lowercase ,bos_token=_lowercase ,eos_token=_lowercase ,sep_token=_lowercase ,cls_token=_lowercase ,unk_token=_lowercase ,pad_token=_lowercase ,mask_token=_lowercase ,src_lang=_lowercase ,tgt_lang=_lowercase ,additional_special_tokens=_lowercase ,**_lowercase ,)
_lowerCAmelCase : List[Any] = vocab_file
_lowerCAmelCase : List[Any] = False if not self.vocab_file else True
_lowerCAmelCase : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Tuple = {
lang_code: self.convert_tokens_to_ids(_lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'en_XX'
_lowerCAmelCase : int = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : int = src_lang
_lowerCAmelCase : Any = self(_lowercase ,add_special_tokens=_lowercase ,return_tensors=_lowercase ,**_lowercase )
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(_lowercase )
_lowerCAmelCase : Optional[int] = tgt_lang_id
return inputs
def __lowerCamelCase ( self ,_A ,_A = "en_XX" ,_A = None ,_A = "ro_RO" ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = src_lang
_lowerCAmelCase : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(_lowercase ,_lowercase ,**_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(_lowercase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
_lowerCAmelCase : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.convert_tokens_to_ids(_lowercase )
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
_lowerCAmelCase : Any = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_lowerCAmelCase : Tuple = os.path.join(
_lowercase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file ,_lowercase )
return (out_vocab_file,)
| 707 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase = get_tests_dir("""fixtures""")
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = mock.Mock()
_lowerCAmelCase : Dict = 500
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[int] = HTTPError
_lowerCAmelCase : List[Any] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=_A ) as mock_head:
_lowerCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def __lowerCamelCase ( cls ):
'''simple docstring'''
_lowerCAmelCase : Any = TOKEN
HfFolder.save_token(_A )
@classmethod
def __lowerCamelCase ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(_A )
feature_extractor.push_to_hub('test-feature-extractor' ,use_auth_token=self._token )
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_A ,getattr(_A ,_A ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_A ,repo_id='test-feature-extractor' ,push_to_hub=_A ,use_auth_token=self._token )
_lowerCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_A ,getattr(_A ,_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(_A )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' ,use_auth_token=self._token )
_lowerCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_A ,getattr(_A ,_A ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_A ,repo_id='valid_org/test-feature-extractor-org' ,push_to_hub=_A ,use_auth_token=self._token )
_lowerCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_A ,getattr(_A ,_A ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
_lowerCAmelCase : List[str] = CustomFeatureExtractor.from_pretrained(_A )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map ,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} ,)
_lowerCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" ,trust_remote_code=_A )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ ,'CustomFeatureExtractor' )
| 708 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 0 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = 'pytorch_model.bin'
@dataclasses.dataclass
class __UpperCamelCase :
_UpperCAmelCase = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class __UpperCamelCase :
_UpperCAmelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
_UpperCAmelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "A csv or a json file containing the validation data."} )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "The name of the task to train on."} , )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __UpperCamelCase :
_UpperCAmelCase = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
_UpperCAmelCase = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
_UpperCAmelCase = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
_UpperCAmelCase = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
_UpperCAmelCase = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
_UpperCAmelCase = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
_UpperCAmelCase = dataclasses.field(
default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
_UpperCAmelCase = dataclasses.field(
default=a__ , metadata={"help": "Random seed for initialization."} , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_lowerCAmelCase : Optional[Any] = dataset.filter(lambda _lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_lowerCAmelCase : Optional[int] = int(eval_result * len(__lowerCAmelCase ) )
print(__lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = dataset.sort('probability' , reverse=__lowerCAmelCase )
_lowerCAmelCase : Optional[int] = dataset.select(range(__lowerCAmelCase ) )
_lowerCAmelCase : Any = dataset.remove_columns(['label', 'probability'] )
_lowerCAmelCase : int = dataset.rename_column('prediction' , 'label' )
_lowerCAmelCase : List[str] = dataset.map(lambda _lowerCamelCase : {"label": idalabel[example["label"]]} )
_lowerCAmelCase : Dict = dataset.shuffle(seed=args.seed )
_lowerCAmelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__lowerCAmelCase , index=__lowerCAmelCase )
else:
dataset.to_json(__lowerCAmelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : List[str] = STModelArguments(model_name_or_path=__lowerCAmelCase )
_lowerCAmelCase : int = STDataArguments(train_file=__lowerCAmelCase , infer_file=__lowerCAmelCase )
_lowerCAmelCase : Any = STTrainingArguments(output_dir=__lowerCAmelCase )
_lowerCAmelCase : Optional[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowerCAmelCase ).items():
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for key, value in kwargs.items():
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Sanity checks
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Dict = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_lowerCAmelCase : List[Any] = args.train_file
_lowerCAmelCase : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_lowerCAmelCase : Union[str, Any] = args.eval_file
for key in data_files:
_lowerCAmelCase : Tuple = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_lowerCAmelCase : List[str] = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
_lowerCAmelCase : Tuple = f"""{args.output_dir}/self-train_iter-{{}}""".format
_lowerCAmelCase : str = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
accelerator.wait_for_everyone()
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Any = False
# Show the progress bar
_lowerCAmelCase : Dict = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_lowerCAmelCase : Tuple = data_dir_format(__lowerCAmelCase )
assert os.path.exists(__lowerCAmelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_lowerCAmelCase : Optional[int] = os.path.join(__lowerCAmelCase , 'stage-1' )
_lowerCAmelCase : List[Any] = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowerCAmelCase , __lowerCAmelCase ):
arguments_dict.update({key: value} )
_lowerCAmelCase : Tuple = os.path.join(__lowerCAmelCase , 'best-checkpoint' , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , __lowerCAmelCase , __lowerCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , __lowerCAmelCase )
finetune(**__lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCAmelCase )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , __lowerCAmelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_lowerCAmelCase : Tuple = os.path.join(__lowerCAmelCase , 'best-checkpoint' )
_lowerCAmelCase : Tuple = os.path.join(__lowerCAmelCase , 'stage-2' )
# Update arguments_dict
_lowerCAmelCase : Dict = model_path
_lowerCAmelCase : Optional[int] = data_files['train']
_lowerCAmelCase : Union[str, Any] = current_output_dir
_lowerCAmelCase : Optional[int] = os.path.join(__lowerCAmelCase , 'best-checkpoint' , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , __lowerCAmelCase , __lowerCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , __lowerCAmelCase )
finetune(**__lowerCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCAmelCase )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , __lowerCAmelCase )
_lowerCAmelCase : int = iteration
_lowerCAmelCase : Dict = data_dir_format(iteration + 1 )
_lowerCAmelCase : int = AutoConfig.from_pretrained(os.path.join(__lowerCAmelCase , 'best-checkpoint' ) )
_lowerCAmelCase : List[Any] = config.idalabel
_lowerCAmelCase : Dict = os.path.join(__lowerCAmelCase , 'eval_results_best-checkpoint.json' )
_lowerCAmelCase : str = os.path.join(__lowerCAmelCase , 'test_results_best-checkpoint.json' )
assert os.path.exists(__lowerCAmelCase )
with open(__lowerCAmelCase , 'r' ) as f:
_lowerCAmelCase : List[str] = float(json.load(__lowerCAmelCase )[args.eval_metric] )
_lowerCAmelCase : int = os.path.join(__lowerCAmelCase , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(__lowerCAmelCase )
# Loading the dataset from local csv or json files.
_lowerCAmelCase : int = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
_lowerCAmelCase : Optional[int] = load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__lowerCAmelCase ):
shutil.copy(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
accelerator.wait_for_everyone()
_lowerCAmelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_lowerCAmelCase : Tuple = eval_result
if best_iteration is None:
_lowerCAmelCase : Any = new_iteration
_lowerCAmelCase : Any = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_lowerCAmelCase : Tuple = new_iteration
_lowerCAmelCase : List[str] = new_eval_result
_lowerCAmelCase : List[Any] = 0
else:
if new_eval_result == best_eval_result:
_lowerCAmelCase : Optional[Any] = new_iteration
_lowerCAmelCase : Union[str, Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_lowerCAmelCase : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , __lowerCAmelCase )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , __lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCAmelCase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowerCAmelCase , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , __lowerCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCAmelCase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowerCAmelCase , 'eval_results_best-iteration.json' ) , ) | 709 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 0 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def get_masked_lm_array(_lowerCamelCase ):
_lowerCAmelCase : str = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_lowerCAmelCase : Union[str, Any] = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
if "kernel" in name:
_lowerCAmelCase : Any = array.transpose()
return torch.from_numpy(lowerCamelCase__ )
def get_encoder_array(_lowerCamelCase ):
_lowerCAmelCase : Any = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_lowerCAmelCase : Tuple = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
if "kernel" in name:
_lowerCAmelCase : str = array.transpose()
return torch.from_numpy(lowerCamelCase__ )
def get_encoder_layer_array(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_lowerCAmelCase : Union[str, Any] = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
if "kernel" in name:
_lowerCAmelCase : str = array.transpose()
return torch.from_numpy(lowerCamelCase__ )
def get_encoder_attention_layer_array(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : str = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_lowerCAmelCase : Optional[Any] = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
_lowerCAmelCase : Tuple = array.reshape(lowerCamelCase__ )
if "kernel" in name:
_lowerCAmelCase : int = array.transpose()
return torch.from_numpy(lowerCamelCase__ )
print(f"""Loading model based on config from {config_path}...""" )
_lowerCAmelCase : Union[str, Any] = BertConfig.from_json_file(lowerCamelCase__ )
_lowerCAmelCase : Optional[Any] = BertForMaskedLM(lowerCamelCase__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_lowerCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
_lowerCAmelCase : BertSelfAttention = layer.attention.self
_lowerCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
lowerCamelCase__ , '_query_dense/kernel' , self_attn.query.weight.data.shape )
_lowerCAmelCase : Dict = get_encoder_attention_layer_array(
lowerCamelCase__ , '_query_dense/bias' , self_attn.query.bias.data.shape )
_lowerCAmelCase : Tuple = get_encoder_attention_layer_array(
lowerCamelCase__ , '_key_dense/kernel' , self_attn.key.weight.data.shape )
_lowerCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
lowerCamelCase__ , '_key_dense/bias' , self_attn.key.bias.data.shape )
_lowerCAmelCase : List[str] = get_encoder_attention_layer_array(
lowerCamelCase__ , '_value_dense/kernel' , self_attn.value.weight.data.shape )
_lowerCAmelCase : Union[str, Any] = get_encoder_attention_layer_array(
lowerCamelCase__ , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
_lowerCAmelCase : BertSelfOutput = layer.attention.output
_lowerCAmelCase : Any = get_encoder_attention_layer_array(
lowerCamelCase__ , '_output_dense/kernel' , self_output.dense.weight.data.shape )
_lowerCAmelCase : str = get_encoder_attention_layer_array(
lowerCamelCase__ , '_output_dense/bias' , self_output.dense.bias.data.shape )
_lowerCAmelCase : List[str] = get_encoder_layer_array(lowerCamelCase__ , '_attention_layer_norm/gamma' )
_lowerCAmelCase : Tuple = get_encoder_layer_array(lowerCamelCase__ , '_attention_layer_norm/beta' )
# Intermediate
_lowerCAmelCase : BertIntermediate = layer.intermediate
_lowerCAmelCase : Union[str, Any] = get_encoder_layer_array(lowerCamelCase__ , '_intermediate_dense/kernel' )
_lowerCAmelCase : Any = get_encoder_layer_array(lowerCamelCase__ , '_intermediate_dense/bias' )
# Output
_lowerCAmelCase : BertOutput = layer.output
_lowerCAmelCase : Union[str, Any] = get_encoder_layer_array(lowerCamelCase__ , '_output_dense/kernel' )
_lowerCAmelCase : Optional[int] = get_encoder_layer_array(lowerCamelCase__ , '_output_dense/bias' )
_lowerCAmelCase : List[str] = get_encoder_layer_array(lowerCamelCase__ , '_output_layer_norm/gamma' )
_lowerCAmelCase : List[str] = get_encoder_layer_array(lowerCamelCase__ , '_output_layer_norm/beta' )
# Embeddings
_lowerCAmelCase : int = get_encoder_array('_position_embedding_layer/embeddings' )
_lowerCAmelCase : Optional[Any] = get_encoder_array('_type_embedding_layer/embeddings' )
_lowerCAmelCase : Any = get_encoder_array('_embedding_norm_layer/gamma' )
_lowerCAmelCase : List[str] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
_lowerCAmelCase : List[Any] = model.cls.predictions.transform
_lowerCAmelCase : List[Any] = get_masked_lm_array('dense/kernel' )
_lowerCAmelCase : Optional[Any] = get_masked_lm_array('dense/bias' )
_lowerCAmelCase : Optional[int] = get_masked_lm_array('layer_norm/gamma' )
_lowerCAmelCase : int = get_masked_lm_array('layer_norm/beta' )
_lowerCAmelCase : List[str] = get_masked_lm_array('embedding_table' )
# Pooling
_lowerCAmelCase : Union[str, Any] = BertPooler(config=lowerCamelCase__ )
_lowerCAmelCase : BertPooler = get_encoder_array('_pooler_layer/kernel' )
_lowerCAmelCase : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(lowerCamelCase__ )
# Integration test - should load without any errors ;)
_lowerCAmelCase : Optional[int] = BertForMaskedLM.from_pretrained(lowerCamelCase__ )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
_lowerCAmelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 710 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCAmelCase = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self ,**_A ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCAmelCase : Tuple = deprecated_arg[3:]
_lowerCAmelCase : Tuple = not kwargs.pop(__A )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
_lowerCAmelCase : Dict = kwargs.pop('tpu_name' ,self.tpu_name )
_lowerCAmelCase : List[Any] = kwargs.pop('device_idx' ,self.device_idx )
_lowerCAmelCase : Optional[int] = kwargs.pop('eager_mode' ,self.eager_mode )
_lowerCAmelCase : Optional[Any] = kwargs.pop('use_xla' ,self.use_xla )
super().__init__(**__A )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Name of TPU"} , )
_UpperCAmelCase = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
_UpperCAmelCase = field(default=a__ , metadata={"help": "Benchmark models in eager model."} )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self ,['tf'] )
_lowerCAmelCase : Any = None
if self.tpu:
try:
if self.tpu_name:
_lowerCAmelCase : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCAmelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCAmelCase : Tuple = None
return tpu
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self ,['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCAmelCase : int = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,'GPU' )
_lowerCAmelCase : Tuple = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] ,'GPU' ) # disable GPU
_lowerCAmelCase : Optional[int] = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self ,['tf'] )
return self._setup_tpu is not None
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self ,['tf'] )
return self._setup_strategy
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self ,['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self ,['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.n_gpu > 0
| 711 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : int = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print('Processing...' )
_lowerCAmelCase : Any = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase : Tuple = random_chars(32 )
_lowerCAmelCase : int = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase : int = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__lowerCAmelCase )} with {file_name}""" )
_lowerCAmelCase : Optional[int] = []
for anno in new_annos[index]:
_lowerCAmelCase : List[Any] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__lowerCAmelCase )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : int = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , '*.txt' ) ):
_lowerCAmelCase : Any = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
_lowerCAmelCase : Tuple = in_file.readlines()
_lowerCAmelCase : str = os.path.join(__lowerCAmelCase , f"""{label_name}.jpg""" )
_lowerCAmelCase : Union[str, Any] = []
for obj_list in obj_lists:
_lowerCAmelCase : Any = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Tuple = []
for idx in range(len(__lowerCAmelCase ) ):
_lowerCAmelCase : str = []
_lowerCAmelCase : int = img_list[idx]
path_list.append(__lowerCAmelCase )
_lowerCAmelCase : Any = anno_list[idx]
_lowerCAmelCase : Optional[int] = cva.imread(__lowerCAmelCase )
if flip_type == 1:
_lowerCAmelCase : Optional[int] = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
_lowerCAmelCase : Union[str, Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase : Union[str, Any] = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
_lowerCAmelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def lowerCamelCase__ ( _lowerCamelCase = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase : List[str] = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 712 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 0 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def wrapper(*_lowerCamelCase , **_lowerCamelCase ):
_lowerCAmelCase : List[Any] = timeit.default_timer()
_lowerCAmelCase : Tuple = func(*__UpperCAmelCase , **__UpperCAmelCase )
_lowerCAmelCase : Dict = timeit.default_timer() - starttime
return delta
_lowerCAmelCase : List[str] = func.__name__
return wrapper
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=100 , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Any = seq_shapes or {}
for i in range(__UpperCAmelCase ):
_lowerCAmelCase : str = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
_lowerCAmelCase : Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
_lowerCAmelCase : Optional[int] = 'The small grey turtle was surprisingly fast when challenged.'
else:
_lowerCAmelCase : Tuple = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
_lowerCAmelCase : int = v.feature
_lowerCAmelCase : Union[str, Any] = seq_shapes[k]
_lowerCAmelCase : List[str] = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
_lowerCAmelCase : int = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=100 , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
_lowerCAmelCase : str = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
_lowerCAmelCase, _lowerCAmelCase : int = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
_lowerCAmelCase : Dict = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 713 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(lowerCAmelCase__ ) as metadata_file:
_lowerCAmelCase : List[Any] = json.load(lowerCAmelCase__ )
_lowerCAmelCase : List[str] = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase : List[str] = torch.load(lowerCAmelCase__ , map_location='cpu' )['module']
# Load the entity vocab file
_lowerCAmelCase : Tuple = load_original_entity_vocab(lowerCAmelCase__ )
# add an entry for [MASK2]
_lowerCAmelCase : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_lowerCAmelCase : int = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase : Optional[Any] = AddedToken('<ent>' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
_lowerCAmelCase : List[str] = AddedToken('<ent2>' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , 'tokenizer_config.json' ) , 'r' ) as f:
_lowerCAmelCase : Union[str, Any] = json.load(lowerCAmelCase__ )
_lowerCAmelCase : Optional[int] = 'MLukeTokenizer'
with open(os.path.join(lowerCAmelCase__ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
_lowerCAmelCase : Any = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
# Initialize the embeddings of the special tokens
_lowerCAmelCase : Any = tokenizer.convert_tokens_to_ids(['@'] )[0]
_lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(['#'] )[0]
_lowerCAmelCase : int = state_dict['embeddings.word_embeddings.weight']
_lowerCAmelCase : Any = word_emb[ent_init_index].unsqueeze(0 )
_lowerCAmelCase : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
_lowerCAmelCase : Optional[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_lowerCAmelCase : str = state_dict[bias_name]
_lowerCAmelCase : Dict = decoder_bias[ent_init_index].unsqueeze(0 )
_lowerCAmelCase : Any = decoder_bias[enta_init_index].unsqueeze(0 )
_lowerCAmelCase : Tuple = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase : Dict = f"""encoder.layer.{layer_index}.attention.self."""
_lowerCAmelCase : Tuple = state_dict[prefix + matrix_name]
_lowerCAmelCase : Tuple = state_dict[prefix + matrix_name]
_lowerCAmelCase : List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase : str = state_dict['entity_embeddings.entity_embeddings.weight']
_lowerCAmelCase : Tuple = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
_lowerCAmelCase : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_lowerCAmelCase : List[Any] = state_dict['entity_predictions.bias']
_lowerCAmelCase : int = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
_lowerCAmelCase : Dict = torch.cat([entity_prediction_bias, entity_mask_bias] )
_lowerCAmelCase : Dict = LukeForMaskedLM(config=lowerCAmelCase__ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
_lowerCAmelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
_lowerCAmelCase : List[str] = state_dict[key]
else:
_lowerCAmelCase : str = state_dict[key]
_lowerCAmelCase, _lowerCAmelCase : List[Any] = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if set(lowerCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowerCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_lowerCAmelCase : int = MLukeTokenizer.from_pretrained(lowerCAmelCase__ , task='entity_classification' )
_lowerCAmelCase : int = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
_lowerCAmelCase : List[str] = (0, 9)
_lowerCAmelCase : int = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='pt' )
_lowerCAmelCase : List[Any] = model(**lowerCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCAmelCase : List[str] = torch.Size((1, 33, 768) )
_lowerCAmelCase : List[Any] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCAmelCase : str = torch.Size((1, 1, 768) )
_lowerCAmelCase : int = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_lowerCAmelCase : Optional[Any] = MLukeTokenizer.from_pretrained(lowerCAmelCase__ )
_lowerCAmelCase : List[str] = 'Tokyo is the capital of <mask>.'
_lowerCAmelCase : Dict = (24, 30)
_lowerCAmelCase : Dict = tokenizer(lowerCAmelCase__ , entity_spans=[span] , return_tensors='pt' )
_lowerCAmelCase : Dict = model(**lowerCAmelCase__ )
_lowerCAmelCase : Dict = encoding['input_ids'][0].tolist()
_lowerCAmelCase : str = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
_lowerCAmelCase : int = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowerCAmelCase__ )
_lowerCAmelCase : List[str] = outputs.entity_logits[0][0].argmax().item()
_lowerCAmelCase : Dict = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowerCAmelCase__ ) )
model.save_pretrained(lowerCAmelCase__ )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ['[MASK]', '[PAD]', '[UNK]']
_lowerCAmelCase : str = [json.loads(lowerCAmelCase__ ) for line in open(lowerCAmelCase__ )]
_lowerCAmelCase : Optional[int] = {}
for entry in data:
_lowerCAmelCase : Dict = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_lowerCAmelCase : Dict = entity_id
break
_lowerCAmelCase : Dict = f"""{language}:{entity_name}"""
_lowerCAmelCase : Union[str, Any] = entity_id
return new_mapping
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_lowerCAmelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 714 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
_lowerCAmelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class __UpperCamelCase ( __lowerCAmelCase ):
_UpperCAmelCase = "tapas"
def __init__( self ,_A=3_0522 ,_A=768 ,_A=12 ,_A=12 ,_A=3072 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=1024 ,_A=[3, 256, 256, 2, 256, 256, 10] ,_A=0.0_2 ,_A=1E-12 ,_A=0 ,_A=10.0 ,_A=0 ,_A=1.0 ,_A=None ,_A=1.0 ,_A=False ,_A=None ,_A=1.0 ,_A=1.0 ,_A=False ,_A=False ,_A="ratio" ,_A=None ,_A=None ,_A=64 ,_A=32 ,_A=False ,_A=True ,_A=False ,_A=False ,_A=True ,_A=False ,_A=None ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_sizes
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
# Fine-tuning task hyperparameters
_lowerCAmelCase : List[Any] = positive_label_weight
_lowerCAmelCase : int = num_aggregation_labels
_lowerCAmelCase : Any = aggregation_loss_weight
_lowerCAmelCase : str = use_answer_as_supervision
_lowerCAmelCase : int = answer_loss_importance
_lowerCAmelCase : List[Any] = use_normalized_answer_loss
_lowerCAmelCase : int = huber_loss_delta
_lowerCAmelCase : Dict = temperature
_lowerCAmelCase : List[str] = aggregation_temperature
_lowerCAmelCase : Tuple = use_gumbel_for_cells
_lowerCAmelCase : Optional[int] = use_gumbel_for_aggregation
_lowerCAmelCase : Optional[int] = average_approximation_function
_lowerCAmelCase : Optional[Any] = cell_selection_preference
_lowerCAmelCase : Optional[Any] = answer_loss_cutoff
_lowerCAmelCase : List[str] = max_num_rows
_lowerCAmelCase : Optional[int] = max_num_columns
_lowerCAmelCase : List[Any] = average_logits_per_cell
_lowerCAmelCase : Dict = select_one_column
_lowerCAmelCase : Optional[int] = allow_empty_column_selection
_lowerCAmelCase : List[Any] = init_cell_selection_weights_to_zero
_lowerCAmelCase : List[Any] = reset_position_index_per_cell
_lowerCAmelCase : Tuple = disable_per_token_loss
# Aggregation hyperparameters
_lowerCAmelCase : Optional[int] = aggregation_labels
_lowerCAmelCase : Any = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_UpperCamelCase ):
_lowerCAmelCase : int = {int(_UpperCamelCase ): v for k, v in aggregation_labels.items()}
| 715 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __UpperCamelCase :
_UpperCAmelCase = 42
# setable values
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = None
@classmethod
def __lowerCamelCase ( cls ,_A ,_A ,_A ):
'''simple docstring'''
return cls(common=__lowerCamelCase ,init_noise_sigma=__lowerCamelCase ,timesteps=__lowerCamelCase )
@dataclass
class __UpperCamelCase ( _A ):
_UpperCAmelCase = 42
class __UpperCamelCase ( _A , _A ):
_UpperCAmelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
_UpperCAmelCase = 42
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self ,_A = 1000 ,_A = 0.0_0_0_1 ,_A = 0.0_2 ,_A = "linear" ,_A = None ,_A = "fixed_small" ,_A = True ,_A = "epsilon" ,_A = jnp.floataa ,):
'''simple docstring'''
_lowerCAmelCase : str = dtype
def __lowerCamelCase ( self ,_A = None ):
'''simple docstring'''
if common is None:
_lowerCAmelCase : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_lowerCAmelCase : Tuple = jnp.array(1.0 ,dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = jnp.arange(0 ,self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCamelCase ,init_noise_sigma=__lowerCamelCase ,timesteps=__lowerCamelCase ,)
def __lowerCamelCase ( self ,_A ,_A ,_A = None ):
'''simple docstring'''
return sample
def __lowerCamelCase ( self ,_A ,_A ,_A = () ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_lowerCAmelCase : int = (jnp.arange(0 ,__lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCamelCase ,timesteps=__lowerCamelCase ,)
def __lowerCamelCase ( self ,_A ,_A ,_A=None ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : str = state.common.alphas_cumprod[t]
_lowerCAmelCase : Union[str, Any] = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCAmelCase : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_lowerCAmelCase : Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_lowerCAmelCase : str = jnp.clip(__lowerCamelCase ,a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_lowerCAmelCase : str = jnp.log(jnp.clip(__lowerCamelCase ,a_min=1E-20 ) )
elif variance_type == "fixed_large":
_lowerCAmelCase : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_lowerCAmelCase : str = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_lowerCAmelCase : Optional[Any] = variance
_lowerCAmelCase : str = state.common.betas[t]
_lowerCAmelCase : Optional[int] = (predicted_variance + 1) / 2
_lowerCAmelCase : int = frac * max_log + (1 - frac) * min_log
return variance
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = timestep
if key is None:
_lowerCAmelCase : Any = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_lowerCAmelCase : Any = jnp.split(__lowerCamelCase ,sample.shape[1] ,axis=1 )
else:
_lowerCAmelCase : Tuple = None
# 1. compute alphas, betas
_lowerCAmelCase : Dict = state.common.alphas_cumprod[t]
_lowerCAmelCase : Tuple = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) )
_lowerCAmelCase : Any = 1 - alpha_prod_t
_lowerCAmelCase : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCAmelCase : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase : Dict = jnp.clip(__lowerCamelCase ,-1 ,1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCAmelCase : Optional[int] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_lowerCAmelCase : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCAmelCase : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_lowerCAmelCase : str = jax.random.split(__lowerCamelCase ,num=1 )
_lowerCAmelCase : Any = jax.random.normal(__lowerCamelCase ,shape=model_output.shape ,dtype=self.dtype )
return (self._get_variance(__lowerCamelCase ,__lowerCamelCase ,predicted_variance=__lowerCamelCase ) ** 0.5) * noise
_lowerCAmelCase : List[Any] = jnp.where(t > 0 ,random_variance() ,jnp.zeros(model_output.shape ,dtype=self.dtype ) )
_lowerCAmelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase ,state=__lowerCamelCase )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
return add_noise_common(state.common ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
return get_velocity_common(state.common ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 716 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ):
'''simple docstring'''
_lowerCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Any = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( _UpperCAmelCase ):
def __init__( self ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,movq=lowerCamelCase_ ,)
_lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
if latents is None:
_lowerCAmelCase : List[Any] = randn_tensor(lowerCamelCase_ ,generator=lowerCamelCase_ ,device=lowerCamelCase_ ,dtype=lowerCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_lowerCAmelCase : int = latents.to(lowerCamelCase_ )
_lowerCAmelCase : List[str] = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self ,_A=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_lowerCAmelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ ,lowerCamelCase_ )
def __lowerCamelCase ( self ,_A=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_lowerCAmelCase : Optional[int] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=lowerCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase : List[Any] = cpu_offload_with_hook(lowerCamelCase_ ,lowerCamelCase_ ,prev_module_hook=lowerCamelCase_ )
# We'll offload the last model manually.
_lowerCAmelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase_ ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self ,_A ,_A ,_A ,_A = 512 ,_A = 512 ,_A = 100 ,_A = 4.0 ,_A = 1 ,_A = None ,_A = None ,_A = "pil" ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._execution_device
_lowerCAmelCase : List[Any] = guidance_scale > 1.0
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
_lowerCAmelCase : str = torch.cat(lowerCamelCase_ ,dim=0 )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
_lowerCAmelCase : Optional[Any] = torch.cat(lowerCamelCase_ ,dim=0 )
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
_lowerCAmelCase : Union[str, Any] = torch.cat(lowerCamelCase_ ,dim=0 )
_lowerCAmelCase : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_lowerCAmelCase : Tuple = image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
_lowerCAmelCase : Optional[Any] = negative_image_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 )
_lowerCAmelCase : Tuple = hint.repeat_interleave(lowerCamelCase_ ,dim=0 )
_lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=lowerCamelCase_ )
_lowerCAmelCase : List[Any] = torch.cat([hint, hint] ,dim=0 ).to(dtype=self.unet.dtype ,device=lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ ,device=lowerCamelCase_ )
_lowerCAmelCase : Dict = self.scheduler.timesteps
_lowerCAmelCase : List[str] = self.movq.config.latent_channels
_lowerCAmelCase : Any = downscale_height_and_width(lowerCamelCase_ ,lowerCamelCase_ ,self.movq_scale_factor )
# create initial latent
_lowerCAmelCase : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : Any = {'''image_embeds''': image_embeds, '''hint''': hint}
_lowerCAmelCase : Dict = self.unet(
sample=lowerCamelCase_ ,timestep=lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,added_cond_kwargs=lowerCamelCase_ ,return_dict=lowerCamelCase_ ,)[0]
if do_classifier_free_guidance:
_lowerCAmelCase : Any = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCAmelCase : str = noise_pred.chunk(2 )
_lowerCAmelCase : List[Any] = variance_pred.chunk(2 )
_lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : str = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase : int = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : str = self.scheduler.step(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ,)[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(lowerCamelCase_ ,force_not_quantize=lowerCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : Optional[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Optional[Any] = image.clamp(0 ,1 )
_lowerCAmelCase : str = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : Optional[int] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 717 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __UpperCamelCase ( _UpperCAmelCase ):
_UpperCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 718 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = u
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = temp * (u - i)
return temp
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = int(input('enter the numbers of values: ' ) )
_lowerCAmelCase : Optional[Any] = []
for _ in range(_lowerCamelCase ):
y.append([] )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
y[i].append(_lowerCamelCase )
_lowerCAmelCase : str = 0
print('enter the values of parameters in a list: ' )
_lowerCAmelCase : str = list(map(_lowerCamelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = float(input() )
_lowerCAmelCase : Union[str, Any] = int(input('enter the value to interpolate: ' ) )
_lowerCAmelCase : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCamelCase ):
for j in range(n - i ):
_lowerCAmelCase : Optional[int] = y[j + 1][i - 1] - y[j][i - 1]
_lowerCAmelCase : Union[str, Any] = y[0][0]
for i in range(1 , _lowerCamelCase ):
summ += (ucal(_lowerCamelCase , _lowerCamelCase ) * y[0][i]) / math.factorial(_lowerCamelCase )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 719 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=100 ,_A=13 ,_A=30 ,_A=2 ,_A=3 ,_A=True ,_A=True ,_A=32 ,_A=4 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=10 ,_A=0.0_2 ,_A=3 ,_A=None ,_A=[0, 1, 2, 3] ,):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : str = 100
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Optional[int] = scope
_lowerCAmelCase : Dict = out_indices
_lowerCAmelCase : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : int = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_lowercase ,initializer_range=self.initializer_range ,out_indices=self.out_indices ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = BeitModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase : Optional[int] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = BeitForMaskedImageModeling(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase : Optional[Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.type_sequence_label_size
_lowerCAmelCase : Dict = BeitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase : int = model(_lowercase ,labels=_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Tuple = BeitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(_lowercase ,labels=_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Any = BeitForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase : Optional[int] = model(_lowercase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_lowerCAmelCase : List[Any] = model(_lowercase ,labels=_lowercase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
_lowerCAmelCase : int = config_and_inputs
_lowerCAmelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BeitModelTester(self )
_lowerCAmelCase : str = ConfigTester(self ,config_class=_lowercase ,has_text_modality=_lowercase ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase ,nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(_lowercase )
_lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Tuple = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_lowercase ), BeitForMaskedImageModeling]:
continue
_lowerCAmelCase : Optional[int] = model_class(_lowercase )
model.to(_lowercase )
model.train()
_lowerCAmelCase : Tuple = self._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase )
_lowerCAmelCase : str = model(**_lowercase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowerCAmelCase : Dict = False
_lowerCAmelCase : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_lowercase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_lowerCAmelCase : int = model_class(_lowercase )
model.gradient_checkpointing_enable()
model.to(_lowercase )
model.train()
_lowerCAmelCase : List[Any] = self._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase )
_lowerCAmelCase : List[Any] = model(**_lowercase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(config=_lowercase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = BeitModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(_lowercase )
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(images=_lowercase ,return_tensors='pt' ).pixel_values.to(_lowercase )
# prepare bool_masked_pos
_lowerCAmelCase : Optional[Any] = torch.ones((1, 196) ,dtype=torch.bool ).to(_lowercase )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(pixel_values=_lowercase ,bool_masked_pos=_lowercase )
_lowerCAmelCase : str = outputs.logits
# verify the logits
_lowerCAmelCase : str = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape ,_lowercase )
_lowerCAmelCase : int = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(_lowercase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] ,_lowercase ,atol=1E-2 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(_lowercase )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=_lowercase ,return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**_lowercase )
_lowerCAmelCase : Dict = outputs.logits
# verify the logits
_lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(logits.shape ,_lowercase )
_lowerCAmelCase : Any = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(_lowercase )
self.assertTrue(torch.allclose(logits[0, :3] ,_lowercase ,atol=1E-4 ) )
_lowerCAmelCase : int = 281
self.assertEqual(logits.argmax(-1 ).item() ,_lowercase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
_lowercase )
_lowerCAmelCase : int = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Union[str, Any] = image_processor(images=_lowercase ,return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Any = model(**_lowercase )
_lowerCAmelCase : str = outputs.logits
# verify the logits
_lowerCAmelCase : List[str] = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape ,_lowercase )
_lowerCAmelCase : Union[str, Any] = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(_lowercase )
self.assertTrue(torch.allclose(logits[0, :3] ,_lowercase ,atol=1E-4 ) )
_lowerCAmelCase : Dict = 2396
self.assertEqual(logits.argmax(-1 ).item() ,_lowercase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_lowerCAmelCase : int = model.to(_lowercase )
_lowerCAmelCase : Any = BeitImageProcessor(do_resize=_lowercase ,size=640 ,do_center_crop=_lowercase )
_lowerCAmelCase : List[str] = load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
_lowerCAmelCase : Tuple = Image.open(ds[0]['file'] )
_lowerCAmelCase : Any = image_processor(images=_lowercase ,return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**_lowercase )
_lowerCAmelCase : str = outputs.logits
# verify the logits
_lowerCAmelCase : Optional[Any] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape ,_lowercase )
_lowerCAmelCase : int = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
_lowerCAmelCase : Dict = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] ,device=_lowercase ,)
else:
_lowerCAmelCase : List[str] = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] ,device=_lowercase ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_lowercase ,atol=1E-4 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_lowerCAmelCase : Optional[Any] = model.to(_lowercase )
_lowerCAmelCase : List[Any] = BeitImageProcessor(do_resize=_lowercase ,size=640 ,do_center_crop=_lowercase )
_lowerCAmelCase : Tuple = load_dataset('hf-internal-testing/fixtures_ade20k' ,split='test' )
_lowerCAmelCase : Any = Image.open(ds[0]['file'] )
_lowerCAmelCase : Optional[int] = image_processor(images=_lowercase ,return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**_lowercase )
_lowerCAmelCase : str = outputs.logits.detach().cpu()
_lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=_lowercase ,target_sizes=[(500, 300)] )
_lowerCAmelCase : int = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,_lowercase )
_lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=_lowercase )
_lowerCAmelCase : Optional[Any] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape ,_lowercase )
| 720 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if num <= 0:
_lowerCAmelCase : Dict = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(snake_case__ )
_lowerCAmelCase : Optional[int] = [True] * (num + 1)
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : int = int(math.sqrt(snake_case__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , snake_case__ ):
if sieve[i] is True:
_lowerCAmelCase : int = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(snake_case__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 721 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __UpperCamelCase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_MASKED_LM_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_MASKED_LM_MAPPING
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,top_k=2 ,framework='tf' )
_lowerCAmelCase : Any = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_A ,decimals=6 ) ,[
{'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 3_8015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 2_5506, 'token_str': ' accuser'},
] ,)
_lowerCAmelCase : List[Any] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_A ,decimals=6 ) ,[
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-05,
'token': 3_8015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-05,
'token': 2_5506,
'token_str': ' accuser',
},
] ,)
_lowerCAmelCase : Optional[int] = unmasker('My name is <mask>' ,targets=[' Patrick', ' Clara', ' Teven'] ,top_k=3 )
self.assertEqual(
nested_simplify(_A ,decimals=6 ) ,[
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_3606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 2941, 'token_str': ' Te'},
] ,)
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,top_k=2 ,framework='pt' )
_lowerCAmelCase : int = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_A ,decimals=6 ) ,[
{'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 3_5676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 1_6416, 'token_str': 'ELS'},
] ,)
_lowerCAmelCase : Union[str, Any] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_A ,decimals=6 ) ,[
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-05,
'token': 3_5676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 1_6416, 'token_str': 'ELS'},
] ,)
_lowerCAmelCase : Tuple = unmasker('My name is <mask>' ,targets=[' Patrick', ' Clara', ' Teven'] ,top_k=3 )
self.assertEqual(
nested_simplify(_A ,decimals=6 ) ,[
{'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_3606, 'token_str': ' Clara'},
] ,)
_lowerCAmelCase : int = unmasker('My name is <mask> <mask>' ,top_k=2 )
self.assertEqual(
nested_simplify(_A ,decimals=6 ) ,[
[
{
'score': 2.2E-05,
'token': 3_5676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-05, 'token': 1_6416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-05,
'token': 3_5676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-05, 'token': 1_6416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] ,)
@require_torch_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = pipeline('fill-mask' ,model='hf-internal-testing/tiny-random-distilbert' ,device=0 ,framework='pt' )
# convert model to fp16
pipe.model.half()
_lowerCAmelCase : Tuple = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_A ,_A )
@slow
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = pipeline(task='fill-mask' ,model='distilroberta-base' ,top_k=2 ,framework='pt' )
self.run_large_test(_A )
@slow
@require_tf
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = pipeline(task='fill-mask' ,model='distilroberta-base' ,top_k=2 ,framework='tf' )
self.run_large_test(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_A ) ,[
{'sequence': 'My name is John', 'score': 0.0_0_8, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_0_7, 'token': 1573, 'token_str': ' Chris'},
] ,)
_lowerCAmelCase : List[Any] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_A ) ,[
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_5_1,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_1_4,
'token': 1_2790,
'token_str': ' Lyon',
},
] ,)
_lowerCAmelCase : Optional[int] = unmasker('My name is <mask>' ,targets=[' Patrick', ' Clara', ' Teven'] ,top_k=3 )
self.assertEqual(
nested_simplify(_A ) ,[
{'sequence': 'My name is Patrick', 'score': 0.0_0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_0_0, 'token': 1_3606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_0_0, 'token': 2941, 'token_str': ' Te'},
] ,)
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,framework='pt' )
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[int] = None
self.run_pipeline_test(_A ,[] )
@require_tf
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,framework='tf' )
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Dict = None
self.run_pipeline_test(_A ,[] )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
_lowerCAmelCase : List[str] = FillMaskPipeline(model=_A ,tokenizer=_A )
_lowerCAmelCase : List[str] = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = fill_masker.tokenizer
_lowerCAmelCase : Optional[int] = fill_masker.model
_lowerCAmelCase : Union[str, Any] = fill_masker(
F"""This is a {tokenizer.mask_token}""" ,)
self.assertEqual(
_A ,[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] ,)
_lowerCAmelCase : Any = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_A ,[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] ,)
_lowerCAmelCase : Tuple = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_A ,[
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
] ,)
with self.assertRaises(_A ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_A ):
fill_masker('This is' )
self.run_test_top_k(_A ,_A )
self.run_test_targets(_A ,_A )
self.run_test_top_k_targets(_A ,_A )
self.fill_mask_with_duplicate_targets_and_top_k(_A ,_A )
self.fill_mask_with_multiple_masks(_A ,_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tokenizer.get_vocab()
_lowerCAmelCase : Tuple = sorted(vocab.keys() )[:2]
# Pipeline argument
_lowerCAmelCase : Optional[Any] = FillMaskPipeline(model=_A ,tokenizer=_A ,targets=_A )
_lowerCAmelCase : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_A ,[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] ,)
_lowerCAmelCase : Union[str, Any] = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} ,_A )
_lowerCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} ,set(_A ) )
# Call argument
_lowerCAmelCase : Optional[Any] = FillMaskPipeline(model=_A ,tokenizer=_A )
_lowerCAmelCase : str = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=_A )
self.assertEqual(
_A ,[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] ,)
_lowerCAmelCase : int = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} ,_A )
_lowerCAmelCase : Dict = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} ,set(_A ) )
# Score equivalence
_lowerCAmelCase : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=_A )
_lowerCAmelCase : Any = [top_mask['token_str'] for top_mask in outputs]
_lowerCAmelCase : Optional[int] = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_A ) == set(_A ):
_lowerCAmelCase : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=_A )
_lowerCAmelCase : Union[str, Any] = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_A ) ,nested_simplify(_A ) )
# Raises with invalid
with self.assertRaises(_A ):
_lowerCAmelCase : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_A ):
_lowerCAmelCase : int = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=[''] )
with self.assertRaises(_A ):
_lowerCAmelCase : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets='' )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = FillMaskPipeline(model=_A ,tokenizer=_A ,top_k=2 )
_lowerCAmelCase : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_A ,[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] ,)
_lowerCAmelCase : Optional[Any] = FillMaskPipeline(model=_A ,tokenizer=_A )
_lowerCAmelCase : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
_A ,[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
] ,)
self.assertEqual(nested_simplify(_A ) ,nested_simplify(_A ) )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tokenizer.get_vocab()
_lowerCAmelCase : List[str] = FillMaskPipeline(model=_A ,tokenizer=_A )
# top_k=2, ntargets=3
_lowerCAmelCase : str = sorted(vocab.keys() )[:3]
_lowerCAmelCase : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=2 ,targets=_A )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_lowerCAmelCase : Dict = [el['token_str'] for el in sorted(_A ,key=lambda _A : x["score"] ,reverse=_A )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_A ).issubset(_A ):
_lowerCAmelCase : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=3 ,targets=_A )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_A ) ,nested_simplify(_A ) )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = FillMaskPipeline(model=_A ,tokenizer=_A )
_lowerCAmelCase : Tuple = tokenizer.get_vocab()
# String duplicates + id duplicates
_lowerCAmelCase : List[Any] = sorted(vocab.keys() )[:3]
_lowerCAmelCase : List[Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_lowerCAmelCase : List[Any] = fill_masker(F"""My name is {tokenizer.mask_token}""" ,targets=_A ,top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_A ) ,3 )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = FillMaskPipeline(model=_A ,tokenizer=_A )
_lowerCAmelCase : Union[str, Any] = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
_A ,[
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
[
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
{'sequence': ANY(_A ), 'score': ANY(_A ), 'token': ANY(_A ), 'token_str': ANY(_A )},
],
] ,)
| 700 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
_lowerCAmelCase = """docs/source/en/_toctree.yml"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = defaultdict(_lowerCamelCase )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[int] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Any = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : Union[str, Any] = []
for duplicate_key in duplicates:
_lowerCAmelCase : Optional[Any] = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_lowerCAmelCase : int = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def lowerCamelCase__ ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding='utf-8' ) as f:
_lowerCAmelCase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : str = api_doc[scheduler_idx]['sections']
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : Optional[int] = True
if overwrite:
_lowerCAmelCase : List[Any] = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowerCamelCase__ ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding='utf-8' ) as f:
_lowerCAmelCase : str = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : Any = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase : Optional[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Tuple = api_doc[pipeline_idx]['sections']
_lowerCAmelCase : Dict = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : Any = pipeline_doc['section']
_lowerCAmelCase : Dict = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : List[str] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Tuple = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Optional[Any] = True
if overwrite:
_lowerCAmelCase : Union[str, Any] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : str = api_doc
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowerCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 701 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 0 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch.load(snake_case_ , map_location='cpu' )
if "model" in sd.keys():
_lowerCAmelCase : List[Any] = torch.load(snake_case_ , map_location='cpu' )["model"]
# pop unnecessary weights
_lowerCAmelCase : List[str] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
_lowerCAmelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCAmelCase : Optional[int] = sd.pop(snake_case_ )
_lowerCAmelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCAmelCase : int = sd[key]
# We split QKV in separate Q,K,V
_lowerCAmelCase : List[Any] = key.replace('.qkv_proj.' , '.q_proj.' )
_lowerCAmelCase : Any = key.replace('.qkv_proj.' , '.k_proj.' )
_lowerCAmelCase : Optional[int] = key.replace('.qkv_proj.' , '.v_proj.' )
_lowerCAmelCase : int = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCAmelCase : Dict = torch.split(snake_case_ , depth // 3 , dim=0 )
_lowerCAmelCase : Union[str, Any] = q
_lowerCAmelCase : Optional[Any] = k
_lowerCAmelCase : Union[str, Any] = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = load_checkpoint(snake_case_ )
if config is not None:
_lowerCAmelCase : Tuple = OPTConfig.from_pretrained(snake_case_ )
else:
_lowerCAmelCase : Tuple = OPTConfig()
_lowerCAmelCase : Union[str, Any] = OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
_lowerCAmelCase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 702 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
_UpperCAmelCase = AutoencoderKL
_UpperCAmelCase = "sample"
_UpperCAmelCase = 1E-2
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : str = (32, 32)
_lowerCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
return {"sample": image}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (3, 32, 32)
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (3, 32, 32)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_lowerCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(torch_device == 'mps' ,'Gradient checkpointing skipped on MPS' )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase : List[Any] = self.model_class(**_A )
model.to(_A )
assert not model.is_gradient_checkpointing and model.training
_lowerCAmelCase : List[str] = model(**_A ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCAmelCase : Dict = torch.randn_like(_A )
_lowerCAmelCase : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCAmelCase : Optional[int] = self.model_class(**_A )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_A )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCAmelCase : Tuple = model_a(**_A ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCAmelCase : int = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCAmelCase : Dict = dict(model.named_parameters() )
_lowerCAmelCase : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5E-5 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' ,output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(_A )
_lowerCAmelCase : Union[str, Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
_lowerCAmelCase : int = model.to(_A )
model.eval()
if torch_device == "mps":
_lowerCAmelCase : List[Any] = torch.manual_seed(0 )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(0 )
_lowerCAmelCase : Dict = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
_lowerCAmelCase : List[str] = image.to(_A )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(_A ,sample_posterior=_A ,generator=_A ).sample
_lowerCAmelCase : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCAmelCase : List[Any] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_lowerCAmelCase : Optional[Any] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
_lowerCAmelCase : Any = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(_A ,_A ,rtol=1E-2 ) )
@slow
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_A ) for s in shape] )}.npy"""
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A=0 ,_A=(4, 3, 512, 512) ,_A=False ):
'''simple docstring'''
_lowerCAmelCase : str = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase : Any = torch.from_numpy(load_hf_numpy(self.get_file_format(_A ,_A ) ) ).to(_A ).to(_A )
return image
def __lowerCamelCase ( self ,_A="CompVis/stable-diffusion-v1-4" ,_A=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "fp16" if fpaa else None
_lowerCAmelCase : str = torch.floataa if fpaa else torch.floataa
_lowerCAmelCase : Union[str, Any] = AutoencoderKL.from_pretrained(
_A ,subfolder='vae' ,torch_dtype=_A ,revision=_A ,)
model.to(_A ).eval()
return model
def __lowerCamelCase ( self ,_A=0 ):
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_A )
return torch.Generator(device=_A ).manual_seed(_A )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_sd_vae_model()
_lowerCAmelCase : List[str] = self.get_sd_image(_A )
_lowerCAmelCase : List[Any] = self.get_generator(_A )
with torch.no_grad():
_lowerCAmelCase : List[str] = model(_A ,generator=_A ,sample_posterior=_A ).sample
assert sample.shape == image.shape
_lowerCAmelCase : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase : Optional[int] = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(_A ,_A ,atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_sd_vae_model(fpaa=_A )
_lowerCAmelCase : Optional[int] = self.get_sd_image(_A ,fpaa=_A )
_lowerCAmelCase : Optional[Any] = self.get_generator(_A )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(_A ,generator=_A ,sample_posterior=_A ).sample
assert sample.shape == image.shape
_lowerCAmelCase : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase : int = torch.tensor(_A )
assert torch_all_close(_A ,_A ,atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_sd_vae_model()
_lowerCAmelCase : Dict = self.get_sd_image(_A )
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(_A ).sample
assert sample.shape == image.shape
_lowerCAmelCase : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCAmelCase : List[str] = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(_A ,_A ,atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_sd_vae_model()
_lowerCAmelCase : List[Any] = self.get_sd_image(_A ,shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase : str = model.decode(_A ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase : Tuple = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCAmelCase : Tuple = torch.tensor(_A )
assert torch_all_close(_A ,_A ,atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_sd_vae_model(fpaa=_A )
_lowerCAmelCase : Optional[int] = self.get_sd_image(_A ,shape=(3, 4, 64, 64) ,fpaa=_A )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model.decode(_A ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_lowerCAmelCase : Any = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCAmelCase : Any = torch.tensor(_A )
assert torch_all_close(_A ,_A ,atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='xformers is not required when using PyTorch 2.0.' )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_sd_vae_model(fpaa=_A )
_lowerCAmelCase : Union[str, Any] = self.get_sd_image(_A ,shape=(3, 4, 64, 64) ,fpaa=_A )
with torch.no_grad():
_lowerCAmelCase : List[str] = model.decode(_A ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase : Tuple = model.decode(_A ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_A ,_A ,atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='xformers is not required when using PyTorch 2.0.' )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_sd_vae_model()
_lowerCAmelCase : int = self.get_sd_image(_A ,shape=(3, 4, 64, 64) )
with torch.no_grad():
_lowerCAmelCase : int = model.decode(_A ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCAmelCase : Tuple = model.decode(_A ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_A ,_A ,atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_sd_vae_model()
_lowerCAmelCase : List[str] = self.get_sd_image(_A )
_lowerCAmelCase : List[str] = self.get_generator(_A )
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model.encode(_A ).latent_dist
_lowerCAmelCase : Tuple = dist.sample(generator=_A )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCAmelCase : List[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCAmelCase : Union[str, Any] = torch.tensor(_A )
_lowerCAmelCase : Optional[Any] = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(_A ,_A ,atol=_A )
| 703 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 0 |
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowerCamelCase__ ( _lowerCamelCase = 100 ):
'''simple docstring'''
_lowerCAmelCase : str = 1
_lowerCAmelCase : int = 2
for i in range(2 , max_n + 1 ):
_lowerCAmelCase : Any = pre_numerator
_lowerCAmelCase : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
_lowerCAmelCase : str = cur_numerator
_lowerCAmelCase : str = e_cont * pre_numerator + temp
return sum_digits(_lowerCamelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 704 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
while a != 0:
_lowerCAmelCase : Tuple = b % a, a
return b
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if gcd(snake_case__ , snake_case__ ) != 1:
_lowerCAmelCase : Union[str, Any] = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(snake_case__ )
_lowerCAmelCase : str = 1, 0, a
_lowerCAmelCase : List[str] = 0, 1, m
while va != 0:
_lowerCAmelCase : Union[str, Any] = ua // va
_lowerCAmelCase : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 705 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowerCAmelCase = """\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"""
class __UpperCamelCase ( a__ ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = parser.add_parser(
'convert' ,help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' ,)
train_parser.add_argument('--model_type' ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' ,type=__lowerCAmelCase ,default='' ,help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' ,type=__lowerCAmelCase ,default=__lowerCAmelCase ,help='Optional fine-tuning task name if the TF model was a finetuned model.' ,)
train_parser.set_defaults(func=__lowerCAmelCase )
def __init__( self ,_A ,_A ,_A ,_A ,_A ,*_A ,):
'''simple docstring'''
_lowerCAmelCase : Any = logging.get_logger('transformers-cli/converting' )
self._logger.info(F"""Loading model {model_type}""" )
_lowerCAmelCase : List[str] = model_type
_lowerCAmelCase : List[Any] = tf_checkpoint
_lowerCAmelCase : Dict = pytorch_dump_output
_lowerCAmelCase : str = config
_lowerCAmelCase : str = finetuning_task_name
def __lowerCamelCase ( self ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
_lowerCAmelCase : Tuple = self._tf_checkpoint
_lowerCAmelCase : Union[str, Any] = ''
else:
_lowerCAmelCase : List[str] = self._tf_checkpoint
_lowerCAmelCase : Union[str, Any] = ''
convert_transfo_xl_checkpoint_to_pytorch(
__lowerCAmelCase ,self._config ,self._pytorch_dump_output ,__lowerCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowerCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 706 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = 0
_lowerCAmelCase : int = len(UpperCAmelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_lowerCAmelCase : Dict = i + 1
else:
_lowerCAmelCase : Union[str, Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 707 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = PhobertTokenizer
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Optional[int] = ["T@@", "i", "I", "R@@", "r", "e@@"]
_lowerCAmelCase : str = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
_lowerCAmelCase : Any = ["#version: 0.2", "l à</w>"]
_lowerCAmelCase : Dict = {"unk_token": "<unk>"}
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "Tôi là VinAI Research"
_lowerCAmelCase : int = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = PhobertTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_lowerCAmelCase : str = "Tôi là VinAI Research"
_lowerCAmelCase : str = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
_lowerCAmelCase : str = tokenizer.tokenize(lowerCAmelCase__ )
print(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : int = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
| 708 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
_lowerCAmelCase = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
_lowerCAmelCase = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
_lowerCAmelCase = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
_lowerCAmelCase = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
_lowerCAmelCase = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCAmelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
) | 709 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_UpperCAmelCase = StableDiffusionPanoramaPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Any = DDIMScheduler()
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Union[str, Any] = CLIPTextModel(UpperCAmelCase_ )
_lowerCAmelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(UpperCAmelCase_ )
_lowerCAmelCase : int = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
_lowerCAmelCase : str = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowerCAmelCase : Dict = self.get_dummy_inputs(UpperCAmelCase_ )
_lowerCAmelCase : int = sd_pipe(**UpperCAmelCase_ ).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Dict = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=3.25E-3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : List[Any] = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
_lowerCAmelCase : Tuple = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ )
_lowerCAmelCase : Dict = 'french fries'
_lowerCAmelCase : List[Any] = sd_pipe(**UpperCAmelCase_ ,negative_prompt=UpperCAmelCase_ )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Union[str, Any] = self.get_dummy_components()
_lowerCAmelCase : List[Any] = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
_lowerCAmelCase : Optional[Any] = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowerCAmelCase : List[str] = self.get_dummy_inputs(UpperCAmelCase_ )
_lowerCAmelCase : Dict = sd_pipe(**UpperCAmelCase_ ,view_batch_size=2 )
_lowerCAmelCase : Dict = output.images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : str = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' )
_lowerCAmelCase : Tuple = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
_lowerCAmelCase : List[Any] = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(UpperCAmelCase_ )
_lowerCAmelCase : Optional[int] = sd_pipe(**UpperCAmelCase_ ).images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = PNDMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,skip_prk_steps=UpperCAmelCase_ )
_lowerCAmelCase : Optional[Any] = StableDiffusionPanoramaPipeline(**UpperCAmelCase_ )
_lowerCAmelCase : Optional[Any] = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowerCAmelCase : List[str] = self.get_dummy_inputs(UpperCAmelCase_ )
_lowerCAmelCase : Tuple = sd_pipe(**UpperCAmelCase_ ).images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Tuple = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch.manual_seed(UpperCAmelCase_ )
_lowerCAmelCase : Union[str, Any] = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 'stabilityai/stable-diffusion-2-base'
_lowerCAmelCase : str = DDIMScheduler.from_pretrained(UpperCAmelCase_ ,subfolder='scheduler' )
_lowerCAmelCase : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase_ ,scheduler=UpperCAmelCase_ ,safety_checker=UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase : Tuple = self.get_inputs()
_lowerCAmelCase : Any = pipe(**UpperCAmelCase_ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCAmelCase : List[Any] = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' ,safety_checker=UpperCAmelCase_ )
_lowerCAmelCase : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase : List[str] = self.get_inputs()
_lowerCAmelCase : int = pipe(**UpperCAmelCase_ ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_lowerCAmelCase : Optional[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 0
def callback_fn(_A ,_A ,_A ) -> None:
_lowerCAmelCase : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCAmelCase : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCAmelCase : List[Any] = latents[0, -3:, -3:, -1]
_lowerCAmelCase : Any = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_lowerCAmelCase : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_lowerCAmelCase : Dict = latents[0, -3:, -3:, -1]
_lowerCAmelCase : str = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_lowerCAmelCase : int = False
_lowerCAmelCase : Union[str, Any] = 'stabilityai/stable-diffusion-2-base'
_lowerCAmelCase : int = DDIMScheduler.from_pretrained(UpperCAmelCase_ ,subfolder='scheduler' )
_lowerCAmelCase : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase_ ,scheduler=UpperCAmelCase_ ,safety_checker=UpperCAmelCase_ )
_lowerCAmelCase : Dict = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase : List[str] = self.get_inputs()
pipe(**UpperCAmelCase_ ,callback=UpperCAmelCase_ ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : List[Any] = 'stabilityai/stable-diffusion-2-base'
_lowerCAmelCase : int = DDIMScheduler.from_pretrained(UpperCAmelCase_ ,subfolder='scheduler' )
_lowerCAmelCase : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase_ ,scheduler=UpperCAmelCase_ ,safety_checker=UpperCAmelCase_ )
_lowerCAmelCase : str = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : str = self.get_inputs()
_lowerCAmelCase : Any = pipe(**UpperCAmelCase_ )
_lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 710 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 0 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_lowerCAmelCase = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Dict = torchvision.models.resnetaaa(pretrained=UpperCamelCase__ )
_lowerCAmelCase : Tuple = list(model.children() )[:-2]
_lowerCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase__ )
_lowerCAmelCase : Optional[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.pool(self.model(UpperCamelCase__ ) )
_lowerCAmelCase : Dict = torch.flatten(UpperCamelCase__ ,start_dim=2 )
_lowerCAmelCase : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __UpperCamelCase ( UpperCamelCase_ ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [json.loads(UpperCamelCase__ ) for l in open(UpperCamelCase__ )]
_lowerCAmelCase : Dict = os.path.dirname(UpperCamelCase__ )
_lowerCAmelCase : Any = tokenizer
_lowerCAmelCase : Tuple = labels
_lowerCAmelCase : Tuple = len(UpperCamelCase__ )
_lowerCAmelCase : Tuple = max_seq_length
_lowerCAmelCase : int = transforms
def __len__( self ):
'''simple docstring'''
return len(self.data )
def __getitem__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] ,add_special_tokens=UpperCamelCase__ ) )
_lowerCAmelCase : Optional[int] = sentence[0], sentence[1:-1], sentence[-1]
_lowerCAmelCase : List[str] = sentence[: self.max_seq_length]
_lowerCAmelCase : Optional[Any] = torch.zeros(self.n_classes )
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Dict = Image.open(os.path.join(self.data_dir ,self.data[index]['img'] ) ).convert('RGB' )
_lowerCAmelCase : Any = self.transforms(UpperCamelCase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [len(row['sentence'] ) for row in batch]
_lowerCAmelCase : List[str] = len(_lowercase ), max(_lowercase )
_lowerCAmelCase : List[str] = torch.zeros(_lowercase , _lowercase , dtype=torch.long )
_lowerCAmelCase : int = torch.zeros(_lowercase , _lowercase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowercase , _lowercase ) ):
_lowerCAmelCase : Optional[Any] = input_row['''sentence''']
_lowerCAmelCase : str = 1
_lowerCAmelCase : Dict = torch.stack([row['image'] for row in batch] )
_lowerCAmelCase : int = torch.stack([row['label'] for row in batch] )
_lowerCAmelCase : Union[str, Any] = torch.stack([row['image_start_token'] for row in batch] )
_lowerCAmelCase : Dict = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 711 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class __UpperCamelCase :
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
@dataclass(frozen=UpperCAmelCase__ )
class __UpperCamelCase :
_UpperCAmelCase = 42
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __UpperCamelCase ( UpperCAmelCase__ ):
_UpperCAmelCase = 42
def __init__( self ,_A ,_A ,_A ,_A = None ,_A=False ,_A = False ,):
'''simple docstring'''
_lowerCAmelCase : str = hans_processors[task]()
_lowerCAmelCase : List[Any] = os.path.join(
_A ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(_A ) ,_A ,) ,)
_lowerCAmelCase : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCAmelCase : Optional[Any] = label_list[2], label_list[1]
_lowerCAmelCase : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase : List[Any] = cached_features_file + """.lock"""
with FileLock(_A ):
if os.path.exists(_A ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
_lowerCAmelCase : Any = torch.load(_A )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
_lowerCAmelCase : Optional[Any] = (
processor.get_dev_examples(_A ) if evaluate else processor.get_train_examples(_A )
)
logger.info('Training examples: %s' ,len(_A ) )
_lowerCAmelCase : List[Any] = hans_convert_examples_to_features(_A ,_A ,_A ,_A )
logger.info('Saving features into cached file %s' ,_A )
torch.save(self.features ,_A )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,_A ):
'''simple docstring'''
return self.features[i]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class __UpperCamelCase :
_UpperCAmelCase = 42
def __init__( self ,_A ,_A ,_A ,_A = 128 ,_A=False ,_A = False ,):
'''simple docstring'''
_lowerCAmelCase : Dict = hans_processors[task]()
_lowerCAmelCase : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCAmelCase : List[str] = label_list[2], label_list[1]
_lowerCAmelCase : Optional[Any] = label_list
_lowerCAmelCase : Tuple = processor.get_dev_examples(_A ) if evaluate else processor.get_train_examples(_A )
_lowerCAmelCase : Any = hans_convert_examples_to_features(_A ,_A ,_A ,_A )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_A )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_lowerCAmelCase : int = tf.data.Dataset.from_generator(
_A ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,_A ):
'''simple docstring'''
return self.features[i]
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.label_list
class __UpperCamelCase ( UpperCAmelCase__ ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(_A ,'heuristics_train_set.txt' ) ) ,'train' )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(_A ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def __lowerCamelCase ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
for i, line in enumerate(_A ):
if i == 0:
continue
_lowerCAmelCase : Dict = """%s-%s""" % (set_type, line[0])
_lowerCAmelCase : List[str] = line[5]
_lowerCAmelCase : List[Any] = line[6]
_lowerCAmelCase : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
_lowerCAmelCase : Union[str, Any] = line[0]
examples.append(InputExample(guid=_A ,text_a=_A ,text_b=_A ,label=_A ,pairID=_A ) )
return examples
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : str = {label: i for i, label in enumerate(_lowerCamelCase )}
_lowerCAmelCase : List[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_lowerCamelCase ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index) )
_lowerCAmelCase : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , truncation=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , )
_lowerCAmelCase : Union[str, Any] = label_map[example.label] if example.label in label_map else 0
_lowerCAmelCase : str = int(example.pairID )
features.append(InputFeatures(**_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
_lowerCAmelCase = {
"""hans""": 3,
}
_lowerCAmelCase = {
"""hans""": HansProcessor,
}
| 713 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowercase__ ):
_UpperCAmelCase = ["""speech"""]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['speech'] )
class __UpperCamelCase ( metaclass=lowercase__ ):
_UpperCAmelCase = ["""speech"""]
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
requires_backends(self ,['speech'] )
| 714 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class __UpperCamelCase ( __snake_case ):
_UpperCAmelCase = 'swinv2'
_UpperCAmelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self ,_A=224 ,_A=4 ,_A=3 ,_A=96 ,_A=[2, 2, 6, 2] ,_A=[3, 6, 12, 24] ,_A=7 ,_A=4.0 ,_A=True ,_A=0.0 ,_A=0.0 ,_A=0.1 ,_A="gelu" ,_A=False ,_A=0.0_2 ,_A=1E-5 ,_A=32 ,**_A ,):
'''simple docstring'''
super().__init__(**A_ )
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : str = embed_dim
_lowerCAmelCase : Dict = depths
_lowerCAmelCase : List[Any] = len(A_ )
_lowerCAmelCase : Optional[Any] = num_heads
_lowerCAmelCase : List[Any] = window_size
_lowerCAmelCase : Any = mlp_ratio
_lowerCAmelCase : List[Any] = qkv_bias
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = drop_path_rate
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = use_absolute_embeddings
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(A_ ) - 1) )
_lowerCAmelCase : Optional[Any] = (0, 0, 0, 0)
| 715 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(_lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
_lowerCAmelCase : Union[str, Any] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCamelCase ) )
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_lowerCamelCase ) )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
_lowerCAmelCase : str = len(_lowerCamelCase )
_lowerCAmelCase : int = matrix_length // 2
_lowerCAmelCase : Union[str, Any] = [[a[i][j] for j in range(_lowerCamelCase , _lowerCamelCase )] for i in range(_lowerCamelCase )]
_lowerCAmelCase : List[Any] = [
[a[i][j] for j in range(_lowerCamelCase , _lowerCamelCase )] for i in range(_lowerCamelCase , _lowerCamelCase )
]
_lowerCAmelCase : Dict = [[a[i][j] for j in range(_lowerCamelCase )] for i in range(_lowerCamelCase )]
_lowerCAmelCase : Any = [[a[i][j] for j in range(_lowerCamelCase )] for i in range(_lowerCamelCase , _lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return len(_lowerCamelCase ), len(matrix[0] )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
print('\n'.join(str(_lowerCamelCase ) for line in matrix ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if matrix_dimensions(_lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[str] = split_matrix(_lowerCamelCase )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[int] = split_matrix(_lowerCamelCase )
_lowerCAmelCase : List[Any] = actual_strassen(_lowerCamelCase , matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : Dict = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
_lowerCAmelCase : int = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = actual_strassen(_lowerCamelCase , matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : Dict = actual_strassen(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : List[str] = actual_strassen(matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : str = actual_strassen(matrix_subtraction(_lowerCamelCase , _lowerCamelCase ) , matrix_addition(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : List[str] = matrix_addition(matrix_subtraction(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) , _lowerCamelCase )
_lowerCAmelCase : List[str] = matrix_addition(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = matrix_addition(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : str = matrix_subtraction(matrix_subtraction(matrix_addition(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) , _lowerCamelCase )
# construct the new matrix from our 4 quadrants
_lowerCAmelCase : Any = []
for i in range(len(_lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if matrix_dimensions(_lowerCamelCase )[1] != matrix_dimensions(_lowerCamelCase )[0]:
_lowerCAmelCase : Union[str, Any] = (
'Unable to multiply these matrices, please check the dimensions.\n'
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(_lowerCamelCase )
_lowerCAmelCase : Any = matrix_dimensions(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = matrix_dimensions(_lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_lowerCAmelCase : Dict = max(*_lowerCamelCase , *_lowerCamelCase )
_lowerCAmelCase : List[Any] = int(math.pow(2 , math.ceil(math.loga(_lowerCamelCase ) ) ) )
_lowerCAmelCase : Any = matrixa
_lowerCAmelCase : Any = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_lowerCAmelCase : Tuple = actual_strassen(_lowerCamelCase , _lowerCamelCase )
# Removing the additional zeros
for i in range(0 , _lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_lowerCAmelCase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_lowerCAmelCase = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 716 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
_lowerCAmelCase = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
_lowerCAmelCase = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
_lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
_lowerCAmelCase = os.path.join(os.path.expanduser("""~"""), """.cache""")
_lowerCAmelCase = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Any = model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]['file_name'] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase="text" ):
'''simple docstring'''
if model_type == "text":
_lowerCAmelCase : Dict = BarkSemanticModel
_lowerCAmelCase : str = BarkSemanticConfig
_lowerCAmelCase : List[str] = BarkSemanticGenerationConfig
elif model_type == "coarse":
_lowerCAmelCase : Union[str, Any] = BarkCoarseModel
_lowerCAmelCase : Dict = BarkCoarseConfig
_lowerCAmelCase : Optional[int] = BarkCoarseGenerationConfig
elif model_type == "fine":
_lowerCAmelCase : str = BarkFineModel
_lowerCAmelCase : str = BarkFineConfig
_lowerCAmelCase : Any = BarkFineGenerationConfig
else:
raise NotImplementedError()
_lowerCAmelCase : int = f"""{model_type}_small""" if use_small else model_type
_lowerCAmelCase : str = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
_lowerCAmelCase : List[str] = torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
_lowerCAmelCase : Optional[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_lowerCAmelCase : List[Any] = model_args['vocab_size']
_lowerCAmelCase : Optional[Any] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_lowerCAmelCase : Any = model_args.pop('n_head' )
_lowerCAmelCase : Optional[int] = model_args.pop('n_embd' )
_lowerCAmelCase : List[str] = model_args.pop('n_layer' )
_lowerCAmelCase : Tuple = ConfigClass(**checkpoint['model_args'] )
_lowerCAmelCase : List[Any] = ModelClass(config=lowercase__ )
_lowerCAmelCase : str = GenerationConfigClass()
_lowerCAmelCase : Tuple = model_generation_config
_lowerCAmelCase : Dict = checkpoint['model']
# fixup checkpoint
_lowerCAmelCase : str = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
_lowerCAmelCase : Any = k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
_lowerCAmelCase : Any = new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
_lowerCAmelCase : Any = state_dict.pop(lowercase__ )
_lowerCAmelCase : Dict = set(state_dict.keys() ) - set(model.state_dict().keys() )
_lowerCAmelCase : Any = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_lowerCAmelCase : List[str] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_lowerCAmelCase : Dict = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowercase__ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
_lowerCAmelCase : str = model.num_parameters(exclude_embeddings=lowercase__ )
_lowerCAmelCase : int = checkpoint['best_val_loss'].item()
logger.info(f"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase="text" ):
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_lowerCAmelCase : Dict = 'cpu' # do conversion on cpu
_lowerCAmelCase : Optional[Any] = _get_ckpt_path(lowercase__ , use_small=lowercase__ )
_lowerCAmelCase : List[Any] = _load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
_lowerCAmelCase : List[str] = _bark_load_model(lowercase__ , 'cpu' , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
_lowerCAmelCase : Tuple = bark_model['model']
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_lowerCAmelCase : Tuple = 5
_lowerCAmelCase : List[Any] = 10
if model_type in ["text", "coarse"]:
_lowerCAmelCase : List[str] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_lowerCAmelCase : Tuple = bark_model(lowercase__ )[0]
_lowerCAmelCase : Dict = model(lowercase__ )
# take last logits
_lowerCAmelCase : List[Any] = output_new_model_total.logits[:, [-1], :]
else:
_lowerCAmelCase : int = 3
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Dict = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_lowerCAmelCase : List[Any] = model(lowercase__ , lowercase__ )
_lowerCAmelCase : Tuple = bark_model(lowercase__ , lowercase__ )
_lowerCAmelCase : Any = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = os.path.join(lowercase__ , lowercase__ )
_lowerCAmelCase : Tuple = BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , 'config.json' ) )
_lowerCAmelCase : Dict = BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , 'config.json' ) )
_lowerCAmelCase : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(lowercase__ , 'config.json' ) )
_lowerCAmelCase : List[str] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_lowerCAmelCase : Tuple = BarkSemanticModel.from_pretrained(lowercase__ )
_lowerCAmelCase : Union[str, Any] = BarkCoarseModel.from_pretrained(lowercase__ )
_lowerCAmelCase : Any = BarkFineModel.from_pretrained(lowercase__ )
_lowerCAmelCase : Dict = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_lowerCAmelCase : Optional[Any] = BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
_lowerCAmelCase : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_lowerCAmelCase : Optional[Any] = BarkModel(lowercase__ )
_lowerCAmelCase : Optional[Any] = semantic
_lowerCAmelCase : List[Any] = coarseAcoustic
_lowerCAmelCase : Optional[Any] = fineAcoustic
_lowerCAmelCase : Union[str, Any] = codec
_lowerCAmelCase : Dict = bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
_lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 718 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.