code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline _lowercase = logging.get_logger(__name__) @add_end_docstrings(_SCREAMING_SNAKE_CASE ) class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , **_lowercase ): """simple docstring""" super().__init__(**_lowercase ) if self.framework != "pt": raise ValueError(F'The {self.__class__} is only available in PyTorch.' ) # No specific FOR_XXX available yet def __call__( self , _lowercase , **_lowercase ): """simple docstring""" return super().__call__(_lowercase , **_lowercase ) def _lowercase ( self , **_lowercase ): """simple docstring""" _lowerCAmelCase = {} if "candidate_labels" in kwargs: _lowerCAmelCase = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: _lowerCAmelCase = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a sound of {}." ): """simple docstring""" if isinstance(_lowercase , _lowercase ): if audio.startswith("""http://""" ) or audio.startswith("""https://""" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png _lowerCAmelCase = requests.get(_lowercase ).content else: with open(_lowercase , """rb""" ) as f: _lowerCAmelCase = f.read() if isinstance(_lowercase , _lowercase ): _lowerCAmelCase = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate ) if not isinstance(_lowercase , np.ndarray ): raise ValueError("""We expect a numpy ndarray as input""" ) if len(audio.shape ) != 1: raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" ) _lowerCAmelCase = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" ) _lowerCAmelCase = candidate_labels _lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels] _lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase ) _lowerCAmelCase = [text_inputs] return inputs def _lowercase ( self , _lowercase ): """simple docstring""" _lowerCAmelCase = model_inputs.pop("""candidate_labels""" ) _lowerCAmelCase = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , _lowercase ): _lowerCAmelCase = text_inputs[0] else: # Batching case. _lowerCAmelCase = text_inputs[0][0] _lowerCAmelCase = self.model(**_lowercase , **_lowercase ) _lowerCAmelCase = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_audio, } return model_outputs def _lowercase ( self , _lowercase ): """simple docstring""" _lowerCAmelCase = model_outputs.pop("""candidate_labels""" ) _lowerCAmelCase = model_outputs["""logits"""][0] if self.framework == "pt": _lowerCAmelCase = logits.softmax(dim=0 ) _lowerCAmelCase = probs.tolist() else: raise ValueError("""`tf` framework not supported.""" ) _lowerCAmelCase = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] ) ] return result
5
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase ( metaclass=_snake_case ): UpperCAmelCase = ["note_seq"] def __init__( self : List[str] , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ): requires_backends(self , ['''note_seq'''] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ): requires_backends(cls , ['''note_seq'''] ) @classmethod def __SCREAMING_SNAKE_CASE ( cls : str , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[Any] ): requires_backends(cls , ['''note_seq'''] )
467
0
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" _a = OpenAIGPTTokenizer _a = OpenAIGPTTokenizerFast _a = True _a = False def __A ( self ) -> str: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __magic_name__ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] __magic_name__ = dict(zip(A , range(len(A ) ) ) ) __magic_name__ = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', ''''''] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(A ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(A ) ) def __A ( self , A ) -> str: '''simple docstring''' return "lower newer", "lower newer" def __A ( self ) -> Dict: '''simple docstring''' __magic_name__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) __magic_name__ = '''lower''' __magic_name__ = ['''low''', '''er</w>'''] __magic_name__ = tokenizer.tokenize(A ) self.assertListEqual(A , A ) __magic_name__ = tokens + ['''<unk>'''] __magic_name__ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def __A ( self , A=15 ) -> List[str]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): __magic_name__ = self.rust_tokenizer_class.from_pretrained(A , **A ) # Simple input __magic_name__ = '''This is a simple input''' __magic_name__ = ['''This is a simple input 1''', '''This is a simple input 2'''] __magic_name__ = ('''This is a simple input''', '''This is a pair''') __magic_name__ = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' ) # Simple input self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' ) # Simple input self.assertRaises( A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , ) # Pair input self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' ) # Pair input self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' ) # Pair input self.assertRaises( A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , ) def __A ( self ) -> Dict: '''simple docstring''' pass @require_ftfy @require_spacy @require_tokenizers class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" pass
678
def _SCREAMING_SNAKE_CASE ( ): __magic_name__ = [] __magic_name__ = 1 while len(snake_case_ ) < 1E6: constant.append(str(snake_case_ ) ) i += 1 __magic_name__ = ''''''.join(snake_case_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
678
1
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass lowerCAmelCase__ = (3, 9, -1_1, 0, 7, 5, 1, -1) lowerCAmelCase__ = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class a__ : """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = 42 class a__ : """simple docstring""" def __init__( self , lowercase ) -> None: '''simple docstring''' A__ = None for i in sorted(lowercase , reverse=lowercase ): A__ = Node(lowercase , self.head ) def __iter__( self ) -> Iterator[int]: '''simple docstring''' A__ = self.head while node: yield node.data A__ = node.next_node def __len__( self ) -> int: '''simple docstring''' return sum(1 for _ in self ) def __str__( self ) -> str: '''simple docstring''' return " -> ".join([str(lowercase ) for node in self] ) def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Tuple: '''simple docstring''' return SortedLinkedList(list(__A ) + list(__A ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
514
'''simple docstring''' import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def lowerCAmelCase (__A , __A , __A): """simple docstring""" if isinstance(__A , torch.Tensor): return image elif isinstance(__A , PIL.Image.Image): _a = [image] if isinstance(image[0] , PIL.Image.Image): _a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image] _a = np.concatenate(__A , axis=0) _a = np.array(__A).astype(np.floataa) / 2_55.0 _a = image.transpose(0 , 3 , 1 , 2) _a = 2.0 * image - 1.0 _a = torch.from_numpy(__A) elif isinstance(image[0] , torch.Tensor): _a = torch.cat(__A , dim=0) return image def lowerCAmelCase (__A , __A , __A , __A=0.99_95): """simple docstring""" if not isinstance(__A , np.ndarray): _a = True _a = va.device _a = va.cpu().numpy() _a = va.cpu().numpy() _a = np.sum(va * va / (np.linalg.norm(__A) * np.linalg.norm(__A))) if np.abs(__A) > DOT_THRESHOLD: _a = (1 - t) * va + t * va else: _a = np.arccos(__A) _a = np.sin(__A) _a = theta_a * t _a = np.sin(__A) _a = np.sin(theta_a - theta_t) / sin_theta_a _a = sin_theta_t / sin_theta_a _a = sa * va + sa * va if inputs_are_torch: _a = torch.from_numpy(__A).to(__A) return va def lowerCAmelCase (__A , __A): """simple docstring""" _a = F.normalize(__A , dim=-1) _a = F.normalize(__A , dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def lowerCAmelCase (__A , __A): """simple docstring""" for param in model.parameters(): _a = value class __A ( A ): '''simple docstring''' def __init__(self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> str: """simple docstring""" super().__init__() self.register_modules( vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , ) _a = ( feature_extractor.size if isinstance(feature_extractor.size , A ) else feature_extractor.size['''shortest_edge'''] ) _a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , A ) set_requires_grad(self.clip_model , A ) def a__ (self , A = "auto" ) -> Union[str, Any]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _a = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def a__ (self ) -> Optional[Any]: """simple docstring""" self.enable_attention_slicing(A ) def a__ (self ) -> int: """simple docstring""" set_requires_grad(self.vae , A ) def a__ (self ) -> Union[str, Any]: """simple docstring""" set_requires_grad(self.vae , A ) def a__ (self ) -> Dict: """simple docstring""" set_requires_grad(self.unet , A ) def a__ (self ) -> str: """simple docstring""" set_requires_grad(self.unet , A ) def a__ (self , A , A , A ) -> Optional[Any]: """simple docstring""" _a = min(int(num_inference_steps * strength ) , A ) _a = max(num_inference_steps - init_timestep , 0 ) _a = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def a__ (self , A , A , A , A , A , A=None ) -> List[str]: """simple docstring""" if not isinstance(A , torch.Tensor ): raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' ) _a = image.to(device=A , dtype=A ) if isinstance(A , A ): _a = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A ) ] _a = torch.cat(A , dim=0 ) else: _a = self.vae.encode(A ).latent_dist.sample(A ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _a = 0.18215 * init_latents _a = init_latents.repeat_interleave(A , dim=0 ) _a = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A ) # get latents _a = self.scheduler.add_noise(A , A , A ) _a = init_latents return latents def a__ (self , A ) -> Tuple: """simple docstring""" _a = self.coca_transform(A ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): _a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) _a = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' ) def a__ (self , A , A ) -> List[Any]: """simple docstring""" _a = self.feature_extractor.preprocess(A ) _a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half() _a = self.clip_model.get_image_features(A ) _a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A ) _a = image_embeddings_clip.repeat_interleave(A , dim=0 ) return image_embeddings_clip @torch.enable_grad() def a__ (self , A , A , A , A , A , A , A , ) -> Union[str, Any]: """simple docstring""" _a = latents.detach().requires_grad_() _a = self.scheduler.scale_model_input(A , A ) # predict the noise residual _a = self.unet(A , A , encoder_hidden_states=A ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): _a = self.scheduler.alphas_cumprod[timestep] _a = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 _a = torch.sqrt(A ) _a = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , A ): _a = self.scheduler.sigmas[index] _a = latents - sigma * noise_pred else: raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _a = 1 / 0.18215 * sample _a = self.vae.decode(A ).sample _a = (image / 2 + 0.5).clamp(0 , 1 ) _a = transforms.Resize(self.feature_extractor_size )(A ) _a = self.normalize(A ).to(latents.dtype ) _a = self.clip_model.get_image_features(A ) _a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A ) _a = spherical_dist_loss(A , A ).mean() * clip_guidance_scale _a = -torch.autograd.grad(A , A )[0] if isinstance(self.scheduler , A ): _a = latents.detach() + grads * (sigma**2) _a = noise_pred_original else: _a = noise_pred_original - torch.sqrt(A ) * grads return noise_pred, latents @torch.no_grad() def __call__(self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> str: """simple docstring""" if isinstance(A , A ) and len(A ) != batch_size: raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(A , torch.Generator ) and batch_size > 1: _a = [generator] + [None] * (batch_size - 1) _a = [ ('''model''', self.coca_model is None), ('''tokenizer''', self.coca_tokenizer is None), ('''transform''', self.coca_transform is None), ] _a = [x[0] for x in coca_is_none if x[1]] _a = ''', '''.join(A ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(A ): raise ValueError( f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) _a = self.get_image_description(A ) if style_prompt is None: if len(A ): raise ValueError( f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) _a = self.get_image_description(A ) # get prompt text embeddings for content and style _a = self.tokenizer( A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , ) _a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] _a = self.tokenizer( A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , ) _a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] _a = slerp(A , A , A ) # duplicate text embeddings for each generation per prompt _a = text_embeddings.repeat_interleave(A , dim=0 ) # set timesteps _a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) _a = {} if accepts_offset: _a = 1 self.scheduler.set_timesteps(A , **A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) _a , _a = self.get_timesteps(A , A , self.device ) _a = timesteps[:1].repeat(A ) # Preprocess image _a = preprocess(A , A , A ) _a = self.prepare_latents( A , A , A , text_embeddings.dtype , self.device , A ) _a = preprocess(A , A , A ) _a = self.prepare_latents( A , A , A , text_embeddings.dtype , self.device , A ) _a = slerp(A , A , A ) if clip_guidance_scale > 0: _a = self.get_clip_image_embeddings(A , A ) _a = self.get_clip_image_embeddings(A , A ) _a = slerp( A , A , A ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _a = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _a = content_text_input.input_ids.shape[-1] _a = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' ) _a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt _a = uncond_embeddings.repeat_interleave(A , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _a = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _a = (batch_size, self.unet.config.in_channels, height // 8, width // 8) _a = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps _a = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to( self.device ) else: _a = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) _a = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _a = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _a = {} if accepts_eta: _a = eta # check if the scheduler accepts generator _a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: _a = generator with self.progress_bar(total=A ): for i, t in enumerate(A ): # expand the latents if we are doing classifier free guidance _a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _a = self.scheduler.scale_model_input(A , A ) # predict the noise residual _a = self.unet(A , A , encoder_hidden_states=A ).sample # perform classifier free guidance if do_classifier_free_guidance: _a , _a = noise_pred.chunk(2 ) _a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: _a = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) _a , _a = self.cond_fn( A , A , A , A , A , A , A , ) # compute the previous noisy sample x_t -> x_t-1 _a = self.scheduler.step(A , A , A , **A ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _a = 1 / 0.18215 * latents _a = self.vae.decode(A ).sample _a = (image / 2 + 0.5).clamp(0 , 1 ) _a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _a = self.numpy_to_pil(A ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
11
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : List[Any] = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
716
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): UpperCAmelCase = StableDiffusionInpaintPipeline UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase = frozenset([] ) def __UpperCamelCase ( self : Tuple ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , ) _SCREAMING_SNAKE_CASE =PNDMScheduler(skip_prk_steps=_a ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) _SCREAMING_SNAKE_CASE =CLIPTextModel(_a ) _SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _SCREAMING_SNAKE_CASE ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __UpperCamelCase ( self : Tuple , _a : List[Any] , _a : Any=0 ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a ) _SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0] _SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) ) _SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) ) if str(_a ).startswith('''mps''' ): _SCREAMING_SNAKE_CASE =torch.manual_seed(_a ) else: _SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a ) _SCREAMING_SNAKE_CASE ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" _SCREAMING_SNAKE_CASE ='''cpu''' # ensure determinism for the device-dependent torch.Generator _SCREAMING_SNAKE_CASE =self.get_dummy_components() _SCREAMING_SNAKE_CASE =StableDiffusionInpaintPipeline(**_a ) _SCREAMING_SNAKE_CASE =sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) _SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a ) _SCREAMING_SNAKE_CASE =sd_pipe(**_a ).images _SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _SCREAMING_SNAKE_CASE =np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : Any ) -> Tuple: """simple docstring""" _SCREAMING_SNAKE_CASE =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) _SCREAMING_SNAKE_CASE =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) _SCREAMING_SNAKE_CASE =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) _SCREAMING_SNAKE_CASE ='''stabilityai/stable-diffusion-2-inpainting''' _SCREAMING_SNAKE_CASE =StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing() _SCREAMING_SNAKE_CASE ='''Face of a yellow cat, high resolution, sitting on a park bench''' _SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =pipe( prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , ) _SCREAMING_SNAKE_CASE =output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 9E-3 def __UpperCamelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) _SCREAMING_SNAKE_CASE =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) _SCREAMING_SNAKE_CASE =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) _SCREAMING_SNAKE_CASE ='''stabilityai/stable-diffusion-2-inpainting''' _SCREAMING_SNAKE_CASE =StableDiffusionInpaintPipeline.from_pretrained( _a , torch_dtype=torch.floataa , safety_checker=_a , ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing() _SCREAMING_SNAKE_CASE ='''Face of a yellow cat, high resolution, sitting on a park bench''' _SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =pipe( prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , ) _SCREAMING_SNAKE_CASE =output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def __UpperCamelCase ( self : List[str] ) -> Any: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _SCREAMING_SNAKE_CASE =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) _SCREAMING_SNAKE_CASE =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) _SCREAMING_SNAKE_CASE ='''stabilityai/stable-diffusion-2-inpainting''' _SCREAMING_SNAKE_CASE =PNDMScheduler.from_pretrained(_a , subfolder='''scheduler''' ) _SCREAMING_SNAKE_CASE =StableDiffusionInpaintPipeline.from_pretrained( _a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _SCREAMING_SNAKE_CASE ='''Face of a yellow cat, high resolution, sitting on a park bench''' _SCREAMING_SNAKE_CASE =torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE =pipe( prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type='''np''' , ) _SCREAMING_SNAKE_CASE =torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
691
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process snake_case_ : str = logging.getLogger(__name__) def lowerCamelCase( a__ ,a__): return (preds == labels).mean() @dataclass class A__ : UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCAmelCase = field( default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCAmelCase = field( default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCAmelCase = field( default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class A__ : UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} ) UpperCAmelCase = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCAmelCase = field( default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def lowerCamelCase( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''') # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' ,a__) # Set seed set_seed(training_args.seed) try: _SCREAMING_SNAKE_CASE =processors[data_args.task_name]() _SCREAMING_SNAKE_CASE =processor.get_labels() _SCREAMING_SNAKE_CASE =len(a__) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name)) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,) _SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,) _SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,) # Get datasets _SCREAMING_SNAKE_CASE =( MultipleChoiceDataset( data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,) if training_args.do_train else None ) _SCREAMING_SNAKE_CASE =( MultipleChoiceDataset( data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,) if training_args.do_eval else None ) def compute_metrics(a__) -> Dict: _SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1) return {"acc": simple_accuracy(a__ ,p.label_ids)} # Data collator _SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None # Initialize our Trainer _SCREAMING_SNAKE_CASE =Trainer( model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation _SCREAMING_SNAKE_CASE ={} if training_args.do_eval: logger.info('''*** Evaluate ***''') _SCREAMING_SNAKE_CASE =trainer.evaluate() _SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''') if trainer.is_world_master(): with open(a__ ,'''w''') as writer: logger.info('''***** Eval results *****''') for key, value in result.items(): logger.info(''' %s = %s''' ,a__ ,a__) writer.write('''%s = %s\n''' % (key, value)) results.update(a__) return results def lowerCamelCase( a__): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
691
1
def A_ ( snake_case : int = 100 ) -> int: '''simple docstring''' __UpperCamelCase = (n * (n + 1) // 2) ** 2 __UpperCamelCase = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F"{solution() = }")
715
import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def A_ ( snake_case : str , snake_case : str , **snake_case : List[str] ) -> Dict: '''simple docstring''' __UpperCamelCase = AutoConfig.from_pretrained(snake_case , **snake_case ) __UpperCamelCase = AutoModelForSeqaSeqLM.from_config(snake_case ) model.save_pretrained(snake_case ) AutoTokenizer.from_pretrained(snake_case ).save_pretrained(snake_case ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
451
0
from __future__ import annotations lowercase_ = 'Muhammad Umer Farooq' lowercase_ = 'MIT' lowercase_ = '1.0.0' lowercase_ = 'Muhammad Umer Farooq' lowercase_ = 'contact@muhammadumerfarooq.me' lowercase_ = 'Alpha' import re from html.parser import HTMLParser from urllib import parse import requests class A_ ( __UpperCamelCase ): '''simple docstring''' def __init__( self: Optional[Any] , a: str ): super().__init__() __lowerCamelCase : list[str] = [] __lowerCamelCase : Any = domain def _snake_case ( self: Optional[Any] , a: str , a: list[tuple[str, str | None]] ): # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: __lowerCamelCase : int = parse.urljoin(self.domain , a ) self.urls.append(a ) def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): return ".".join(get_sub_domain_name(SCREAMING_SNAKE_CASE__ ).split('.' )[-2:] ) def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): return parse.urlparse(SCREAMING_SNAKE_CASE__ ).netloc def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ = "https://github.com" ): __lowerCamelCase : Optional[int] = get_domain_name(SCREAMING_SNAKE_CASE__ ) # Initialize the parser __lowerCamelCase : str = Parser(SCREAMING_SNAKE_CASE__ ) try: # Open URL __lowerCamelCase : List[Any] = requests.get(SCREAMING_SNAKE_CASE__ ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through __lowerCamelCase : str = set() for link in parser.urls: # open URL. # read = requests.get(link) try: __lowerCamelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ) # Get the valid email. __lowerCamelCase : str = re.findall('[a-zA-Z0-9]+@' + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(SCREAMING_SNAKE_CASE__ ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = emails_from_url('https://github.com') print(F"""{len(emails)} emails found:""") print('\n'.join(sorted(emails)))
669
import numpy as np class A_ : '''simple docstring''' def __init__( self: Optional[int] ): __lowerCamelCase : int = (0, 0) __lowerCamelCase : List[str] = None __lowerCamelCase : int = 0 __lowerCamelCase : int = 0 __lowerCamelCase : Union[str, Any] = 0 def __eq__( self: Optional[int] , a: List[Any] ): return self.position == cell.position def _snake_case ( self: Any ): print(self.position ) class A_ : '''simple docstring''' def __init__( self: str , a: List[str]=(5, 5) ): __lowerCamelCase : Optional[Any] = np.zeros(a ) __lowerCamelCase : List[str] = world_size[0] __lowerCamelCase : Optional[int] = world_size[1] def _snake_case ( self: List[Any] ): print(self.w ) def _snake_case ( self: Optional[int] , a: str ): __lowerCamelCase : Tuple = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] __lowerCamelCase : Optional[int] = cell.position[0] __lowerCamelCase : List[str] = cell.position[1] __lowerCamelCase : Dict = [] for n in neughbour_cord: __lowerCamelCase : Dict = current_x + n[0] __lowerCamelCase : Optional[Any] = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: __lowerCamelCase : Optional[Any] = Cell() __lowerCamelCase : Any = (x, y) __lowerCamelCase : Dict = cell neighbours.append(a ) return neighbours def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : str = [] __lowerCamelCase : int = [] _open.append(SCREAMING_SNAKE_CASE__ ) while _open: __lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] ) __lowerCamelCase : int = _open[min_f] _closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) ) if current == goal: break for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ): for c in _closed: if c == n: continue __lowerCamelCase : Optional[int] = current.g + 1 __lowerCamelCase , __lowerCamelCase : int = n.position __lowerCamelCase , __lowerCamelCase : Tuple = goal.position __lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2 __lowerCamelCase : str = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : Optional[int] = [] while current.parent is not None: path.append(current.position ) __lowerCamelCase : int = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": lowercase_ = Gridworld() # Start position and goal lowercase_ = Cell() lowercase_ = (0, 0) lowercase_ = Cell() lowercase_ = (4, 4) print(F"""path from {start.position} to {goal.position}""") lowercase_ = astar(world, start, goal) # Just for visual reasons. for i in s: lowercase_ = 1 print(world.w)
669
1
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __A =get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase__ ( __UpperCamelCase ,unittest.TestCase ): '''simple docstring''' UpperCamelCase = XLNetTokenizer UpperCamelCase = XLNetTokenizerFast UpperCamelCase = True UpperCamelCase = True def snake_case__ ( self : str ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : Dict = XLNetTokenizer(a_ , keep_accents=a_ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = '''<s>''' __UpperCAmelCase : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def snake_case__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<eod>''' ) self.assertEqual(len(a_ ) , 10_06 ) def snake_case__ ( self : List[str] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def snake_case__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : str = XLNetTokenizer(a_ , keep_accents=a_ ) __UpperCAmelCase : Any = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(a_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [2_85, 46, 10, 1_70, 3_82] ) __UpperCAmelCase : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( a_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __UpperCAmelCase : Dict = tokenizer.convert_tokens_to_ids(a_ ) self.assertListEqual(a_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) __UpperCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(a_ ) self.assertListEqual( a_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def snake_case__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = XLNetTokenizer(a_ , do_lower_case=a_ ) __UpperCAmelCase : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( a_ , [ SPIECE_UNDERLINE + '''''', '''i''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] ) def snake_case__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Dict = XLNetTokenizer(a_ , do_lower_case=a_ ) __UpperCAmelCase : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( a_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) @slow def snake_case__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Tuple = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' ) __UpperCAmelCase : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=a_ ) __UpperCAmelCase : Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a_ ) __UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(a_ ) __UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a_ , a_ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def snake_case__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Any = {'''input_ids''': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
241
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCAmelCase__ : '''simple docstring''' def __init__( self : Dict , a_ : Optional[int] , a_ : List[str]=2 , a_ : Optional[Any]=3 , a_ : str=4 , a_ : Optional[Any]=2 , a_ : List[Any]=7 , a_ : int=True , a_ : Optional[int]=True , a_ : List[Any]=True , a_ : Any=True , a_ : List[Any]=99 , a_ : Dict=36 , a_ : Any=2 , a_ : Any=4 , a_ : List[str]=37 , a_ : int="gelu" , a_ : str=0.1 , a_ : Tuple=0.1 , a_ : Any=5_12 , a_ : int=16 , a_ : List[str]=2 , a_ : Optional[int]=0.0_2 , a_ : Dict=6 , a_ : List[Any]=6 , a_ : Union[str, Any]=3 , a_ : Dict=4 , a_ : Union[str, Any]=None , a_ : Any=10_00 , ): '''simple docstring''' __UpperCAmelCase : str = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Union[str, Any] = num_channels __UpperCAmelCase : Dict = image_size __UpperCAmelCase : Dict = patch_size __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[str] = use_input_mask __UpperCAmelCase : int = use_token_type_ids __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Union[str, Any] = num_hidden_layers __UpperCAmelCase : Dict = num_attention_heads __UpperCAmelCase : Optional[int] = intermediate_size __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : Optional[int] = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : int = type_vocab_size __UpperCAmelCase : str = type_sequence_label_size __UpperCAmelCase : List[str] = initializer_range __UpperCAmelCase : Tuple = coordinate_size __UpperCAmelCase : Any = shape_size __UpperCAmelCase : Tuple = num_labels __UpperCAmelCase : Tuple = num_choices __UpperCAmelCase : Union[str, Any] = scope __UpperCAmelCase : int = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __UpperCAmelCase : Tuple = text_seq_length __UpperCAmelCase : int = (image_size // patch_size) ** 2 + 1 __UpperCAmelCase : Optional[Any] = self.text_seq_length + self.image_seq_length def snake_case__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __UpperCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __UpperCAmelCase : Tuple = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __UpperCAmelCase : Any = bbox[i, j, 3] __UpperCAmelCase : int = bbox[i, j, 1] __UpperCAmelCase : Union[str, Any] = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __UpperCAmelCase : List[Any] = bbox[i, j, 2] __UpperCAmelCase : List[str] = bbox[i, j, 0] __UpperCAmelCase : List[str] = tmp_coordinate __UpperCAmelCase : Any = tf.constant(a_ ) __UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : Optional[int] = None if self.use_input_mask: __UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] ) __UpperCAmelCase : Dict = None if self.use_token_type_ids: __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[Any] = None if self.use_labels: __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __UpperCAmelCase : Union[str, Any] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def snake_case__ ( self : str , a_ : List[Any] , a_ : int , a_ : Any , a_ : Tuple , a_ : str , a_ : int ): '''simple docstring''' __UpperCAmelCase : Optional[int] = TFLayoutLMvaModel(config=a_ ) # text + image __UpperCAmelCase : Optional[Any] = model(a_ , pixel_values=a_ , training=a_ ) __UpperCAmelCase : Optional[Any] = model( a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , training=a_ , ) __UpperCAmelCase : Optional[Any] = model(a_ , bbox=a_ , pixel_values=a_ , training=a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __UpperCAmelCase : Union[str, Any] = model(a_ , training=a_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __UpperCAmelCase : Tuple = model({'''pixel_values''': pixel_values} , training=a_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def snake_case__ ( self : str , a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any] , a_ : Tuple , a_ : Union[str, Any] , a_ : List[str] ): '''simple docstring''' __UpperCAmelCase : str = self.num_labels __UpperCAmelCase : List[str] = TFLayoutLMvaForSequenceClassification(config=a_ ) __UpperCAmelCase : List[Any] = model( a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , training=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__ ( self : Union[str, Any] , a_ : Optional[Any] , a_ : List[Any] , a_ : Tuple , a_ : List[Any] , a_ : Optional[Any] , a_ : Optional[int] , a_ : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[str] = self.num_labels __UpperCAmelCase : Dict = TFLayoutLMvaForTokenClassification(config=a_ ) __UpperCAmelCase : List[Any] = model( a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , training=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def snake_case__ ( self : Dict , a_ : Tuple , a_ : Optional[int] , a_ : List[str] , a_ : List[str] , a_ : List[Any] , a_ : Optional[int] , a_ : Tuple ): '''simple docstring''' __UpperCAmelCase : Dict = 2 __UpperCAmelCase : List[str] = TFLayoutLMvaForQuestionAnswering(config=a_ ) __UpperCAmelCase : Tuple = model( a_ , bbox=a_ , pixel_values=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , training=a_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[str] = self.prepare_config_and_inputs() ((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) : List[str] = config_and_inputs __UpperCAmelCase : List[str] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class UpperCAmelCase__ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ): '''simple docstring''' UpperCamelCase = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) UpperCamelCase = ( {"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel} if is_tf_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def snake_case__ ( self : List[Any] , a_ : Tuple , a_ : Tuple , a_ : Optional[int] , a_ : Optional[int] , a_ : Optional[int] ): '''simple docstring''' return True def snake_case__ ( self : Optional[Any] , a_ : Any , a_ : Union[str, Any] , a_ : Dict=False ): '''simple docstring''' __UpperCAmelCase : str = copy.deepcopy(a_ ) if model_class in get_values(a_ ): __UpperCAmelCase : Dict = { k: tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(a_ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(a_ ): __UpperCAmelCase : List[Any] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(a_ ): __UpperCAmelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __UpperCAmelCase : Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(a_ ): __UpperCAmelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(a_ ): __UpperCAmelCase : str = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def snake_case__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = TFLayoutLMvaModelTester(self ) __UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def snake_case__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self : Any ): '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Union[str, Any] = model_class(a_ ) if getattr(a_ , '''hf_compute_loss''' , a_ ): # The number of elements in the loss should be the same as the number of elements in the label __UpperCAmelCase : List[str] = self._prepare_for_class(inputs_dict.copy() , a_ , return_labels=a_ ) __UpperCAmelCase : Optional[int] = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a_ )[0] ] __UpperCAmelCase : int = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __UpperCAmelCase : List[str] = self._prepare_for_class(inputs_dict.copy() , a_ , return_labels=a_ ) __UpperCAmelCase : List[Any] = prepared_for_class.pop('''input_ids''' ) __UpperCAmelCase : Dict = model(a_ , **a_ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __UpperCAmelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a_ , return_labels=a_ ) __UpperCAmelCase : int = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: __UpperCAmelCase : int = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __UpperCAmelCase : int = -1_00 __UpperCAmelCase : Any = tf.convert_to_tensor(a_ ) __UpperCAmelCase : Optional[int] = model(a_ , **a_ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __UpperCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a_ , return_labels=a_ ) __UpperCAmelCase : Any = model(a_ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __UpperCAmelCase : Dict = self._prepare_for_class(inputs_dict.copy() , a_ , return_labels=a_ ) # Get keys that were added with the _prepare_for_class function __UpperCAmelCase : Union[str, Any] = prepared_for_class.keys() - inputs_dict.keys() __UpperCAmelCase : Dict = inspect.signature(model.call ).parameters __UpperCAmelCase : Any = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __UpperCAmelCase : List[str] = {0: '''input_ids'''} for label_key in label_keys: __UpperCAmelCase : str = signature_names.index(a_ ) __UpperCAmelCase : str = label_key __UpperCAmelCase : List[str] = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __UpperCAmelCase : Optional[int] = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __UpperCAmelCase : str = prepared_for_class[value] __UpperCAmelCase : Union[str, Any] = tuple(a_ ) # Send to model __UpperCAmelCase : int = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def snake_case__ ( self : List[str] ): '''simple docstring''' ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(a_ , a_ , a_ , a_ , a_ , a_ ) def snake_case__ ( self : Optional[int] ): '''simple docstring''' ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : List[Any] = type self.model_tester.create_and_check_model(a_ , a_ , a_ , a_ , a_ , a_ ) def snake_case__ ( self : Dict ): '''simple docstring''' ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( a_ , a_ , a_ , a_ , a_ , a_ , a_ ) def snake_case__ ( self : Union[str, Any] ): '''simple docstring''' ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( a_ , a_ , a_ , a_ , a_ , a_ , a_ ) def snake_case__ ( self : List[str] ): '''simple docstring''' ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( a_ , a_ , a_ , a_ , a_ , a_ , a_ ) @slow def snake_case__ ( self : Optional[Any] ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : int = TFLayoutLMvaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def a ( ): '''simple docstring''' __UpperCAmelCase : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class UpperCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def snake_case__ ( self : Dict ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=a_ ) if is_vision_available() else None @slow def snake_case__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) __UpperCAmelCase : Any = self.default_image_processor __UpperCAmelCase : Dict = prepare_img() __UpperCAmelCase : List[str] = image_processor(images=a_ , return_tensors='''tf''' ).pixel_values __UpperCAmelCase : List[Any] = tf.constant([[1, 2]] ) __UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __UpperCAmelCase : List[str] = model(input_ids=a_ , bbox=a_ , pixel_values=a_ , training=a_ ) # verify the logits __UpperCAmelCase : List[Any] = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , a_ ) __UpperCAmelCase : Any = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ) )
241
1
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow UpperCAmelCase_ = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) UpperCAmelCase_ = logging.getLogger() def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""-f""" ) UpperCAmelCase__ = parser.parse_args() return args.f def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int]="eval" ): '''simple docstring''' UpperCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}_results.json''' ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ , """r""" ) as f: return json.load(SCREAMING_SNAKE_CASE__ ) raise ValueError(F'''can\'t find {path}''' ) UpperCAmelCase_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.get_auto_remove_tmp_dir() UpperCAmelCase__ = f''' run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(_UpperCAmelCase , """argv""" , _UpperCAmelCase ): run_flax_glue.main() UpperCAmelCase__ = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 ) @slow def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.get_auto_remove_tmp_dir() UpperCAmelCase__ = f''' run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(_UpperCAmelCase , """argv""" , _UpperCAmelCase ): run_clm_flax.main() UpperCAmelCase__ = get_results(_UpperCAmelCase ) self.assertLess(result["""eval_perplexity"""] , 1_00 ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_auto_remove_tmp_dir() UpperCAmelCase__ = f''' run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate '''.split() with patch.object(_UpperCAmelCase , """argv""" , _UpperCAmelCase ): run_summarization_flax.main() UpperCAmelCase__ = get_results(_UpperCAmelCase , split="""test""" ) self.assertGreaterEqual(result["""test_rouge1"""] , 10 ) self.assertGreaterEqual(result["""test_rouge2"""] , 2 ) self.assertGreaterEqual(result["""test_rougeL"""] , 7 ) self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 ) @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.get_auto_remove_tmp_dir() UpperCAmelCase__ = f''' run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 '''.split() with patch.object(_UpperCAmelCase , """argv""" , _UpperCAmelCase ): run_mlm_flax.main() UpperCAmelCase__ = get_results(_UpperCAmelCase ) self.assertLess(result["""eval_perplexity"""] , 42 ) @slow def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.get_auto_remove_tmp_dir() UpperCAmelCase__ = f''' run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(_UpperCAmelCase , """argv""" , _UpperCAmelCase ): run_ta_mlm_flax.main() UpperCAmelCase__ = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase__ = self.get_auto_remove_tmp_dir() UpperCAmelCase__ = f''' run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 '''.split() with patch.object(_UpperCAmelCase , """argv""" , _UpperCAmelCase ): run_flax_ner.main() UpperCAmelCase__ = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 ) self.assertGreaterEqual(result["""eval_f1"""] , 0.3 ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_auto_remove_tmp_dir() UpperCAmelCase__ = f''' run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 '''.split() with patch.object(_UpperCAmelCase , """argv""" , _UpperCAmelCase ): run_qa.main() UpperCAmelCase__ = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result["""eval_f1"""] , 30 ) self.assertGreaterEqual(result["""eval_exact"""] , 30 )
603
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
603
1
from collections.abc import Generator from math import sin def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]: if len(__A ) != 32: raise ValueError('''Input must be of length 32''' ) snake_case__ = B'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]: if i < 0: raise ValueError('''Input must be non-negative''' ) snake_case__ = format(__A , '''08x''' )[-8:] snake_case__ = B'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str: snake_case__ = B'''''' for char in message: bit_string += format(__A , '''08b''' ).encode('''utf-8''' ) snake_case__ = format(len(__A ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__A ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]: if len(__A ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(__A ) , 512 ): snake_case__ = bit_string[pos : pos + 512] snake_case__ = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Tuple: if i < 0: raise ValueError('''Input must be non-negative''' ) snake_case__ = format(__A , '''032b''' ) snake_case__ = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(__A , 2 ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int: return (a + b) % 2**32 def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]: snake_case__ = preprocess(__A ) snake_case__ = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states snake_case__ = 0x67_452_301 snake_case__ = 0xef_cda_b89 snake_case__ = 0x98_bad_cfe snake_case__ = 0x10_325_476 snake_case__ = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__A ): snake_case__ = aa snake_case__ = ba snake_case__ = ca snake_case__ = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f snake_case__ = d ^ (b & (c ^ d)) snake_case__ = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f snake_case__ = c ^ (d & (b ^ c)) snake_case__ = (5 * i + 1) % 16 elif i <= 47: snake_case__ = b ^ c ^ d snake_case__ = (3 * i + 5) % 16 else: snake_case__ = c ^ (b | not_aa(__A )) snake_case__ = (7 * i) % 16 snake_case__ = (f + a + added_consts[i] + block_words[g]) % 2**32 snake_case__ = d snake_case__ = c snake_case__ = b snake_case__ = sum_aa(__A , left_rotate_aa(__A , shift_amounts[i] ) ) # Add hashed chunk to running total snake_case__ = sum_aa(__A , __A ) snake_case__ = sum_aa(__A , __A ) snake_case__ = sum_aa(__A , __A ) snake_case__ = sum_aa(__A , __A ) snake_case__ = reformat_hex(__A ) + reformat_hex(__A ) + reformat_hex(__A ) + reformat_hex(__A ) return digest if __name__ == "__main__": import doctest doctest.testmod()
701
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: snake_case__ = abs(__lowerCAmelCase ) snake_case__ = 0 while n > 0: res += n % 10 n //= 10 return res def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: snake_case__ = abs(__lowerCAmelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int: return sum(int(__lowerCAmelCase ) for c in str(abs(__lowerCAmelCase ) ) ) def SCREAMING_SNAKE_CASE ( ) -> None: from collections.abc import Callable from timeit import timeit def benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase ) -> None: snake_case__ = F"""{func.__name__}({value})""" snake_case__ = timeit(F"""__main__.{call}""" , setup='''import __main__''' ) print(F"""{call:56} = {func(__lowerCAmelCase )} -- {timing:.4f} seconds""" ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
208
0
import os from collections import deque import torch from torch.utils.data import Dataset class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' def __init__(self ,__lowerCamelCase="" ,__lowerCamelCase="train" ) -> Optional[Any]: """simple docstring""" assert os.path.isdir(__lowerCamelCase ) lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : List[Any] = os.listdir(__lowerCamelCase ) for story_filename in story_filenames_list: if "summary" in story_filename: continue lowerCAmelCase__ : str = os.path.join(__lowerCamelCase ,__lowerCamelCase ) if not os.path.isfile(__lowerCamelCase ): continue self.documents.append(__lowerCamelCase ) def __len__(self ) -> Optional[Any]: """simple docstring""" return len(self.documents ) def __getitem__(self ,__lowerCamelCase ) -> Dict: """simple docstring""" lowerCAmelCase__ : List[str] = self.documents[idx] lowerCAmelCase__ : Dict = document_path.split('''/''' )[-1] with open(__lowerCamelCase ,encoding='''utf-8''' ) as source: lowerCAmelCase__ : List[str] = source.read() lowerCAmelCase__ , lowerCAmelCase__ : List[str] = process_story(__lowerCamelCase ) return document_name, story_lines, summary_lines def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any]): '''simple docstring''' lowerCAmelCase__ : Tuple = list(filter(lambda lowerCamelCase_: len(lowerCamelCase_) != 0 ,[line.strip() for line in raw_story.split('''\n''')])) # for some unknown reason some lines miss a period, add it lowerCAmelCase__ : List[str] = [_add_missing_period(lowerCamelCase_) for line in nonempty_lines] # gather article lines lowerCAmelCase__ : List[Any] = [] lowerCAmelCase__ : Union[str, Any] = deque(lowerCamelCase_) while True: try: lowerCAmelCase__ : Optional[int] = lines.popleft() if element.startswith('''@highlight'''): break story_lines.append(lowerCamelCase_) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines lowerCAmelCase__ : Any = list(filter(lambda lowerCamelCase_: not t.startswith('''@highlight''') ,lowerCamelCase_)) return story_lines, summary_lines def lowerCAmelCase__ ( lowerCamelCase_ : Any): '''simple docstring''' lowerCAmelCase__ : int = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')'''] if line.startswith('''@highlight'''): return line if line[-1] in END_TOKENS: return line return line + "." def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : str): '''simple docstring''' if len(lowerCamelCase_) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(lowerCamelCase_))) return sequence def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : int): '''simple docstring''' lowerCAmelCase__ : Tuple = torch.ones_like(lowerCamelCase_) lowerCAmelCase__ : Optional[int] = sequence == pad_token_id lowerCAmelCase__ : int = 0 return mask def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : int ,lowerCamelCase_ : Tuple): '''simple docstring''' lowerCAmelCase__ : List[str] = [tokenizer.encode(lowerCamelCase_) for line in story_lines] lowerCAmelCase__ : Union[str, Any] = [token for sentence in story_lines_token_ids for token in sentence] lowerCAmelCase__ : Dict = [tokenizer.encode(lowerCamelCase_) for line in summary_lines] lowerCAmelCase__ : List[Any] = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : Optional[int]): '''simple docstring''' lowerCAmelCase__ : str = [] for sequence in batch: lowerCAmelCase__ : Dict = -1 lowerCAmelCase__ : List[str] = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2) batch_embeddings.append(lowerCamelCase_) return torch.tensor(lowerCamelCase_)
647
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowerCamelCase__ ( lowerCamelCase__): '''simple docstring''' def __init__(self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = "arrow" ,**__lowerCamelCase ,) -> Dict: """simple docstring""" super().__init__( split=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,keep_in_memory=__lowerCamelCase ,streaming=__lowerCamelCase ,**__lowerCamelCase ,) lowerCAmelCase__ : List[Any] = load_from_cache_file lowerCAmelCase__ : Any = file_format lowerCAmelCase__ : Dict = Spark( df=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,working_dir=__lowerCamelCase ,**__lowerCamelCase ,) def lowerCAmelCase__ (self ) -> str: """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase__ : List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=__lowerCamelCase ,file_format=self._file_format ,) return self.builder.as_dataset(split=self.split )
647
1
"""simple docstring""" from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def lowerCAmelCase__ ( ) -> int: A = { 'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'], 'path': ['test_1.py', 'test_2.py', 'unit_test.py'], 'content': ['a ' * 20, 'a ' * 30, 'b ' * 7], } A = Dataset.from_dict(lowerCamelCase__ ) return dataset class UpperCAmelCase__ ( UpperCamelCase__ ): def A_ ( self : str ) -> Optional[int]: '''simple docstring''' A = get_dataset() A = make_duplicate_clusters(snake_case , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def A_ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A = get_dataset() A , A = deduplicate_dataset(snake_case ) self.assertEqual(len(snake_case ) , 2 ) print(snake_case ) self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 ) self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , snake_case )
717
"""simple docstring""" import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features A = logging.get_logger(__name__) A = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCAmelCase__ : lowerCAmelCase_ : str = field( default=UpperCamelCase ,metadata={"""help""": """Model type selected in the list: """ + """, """.join(UpperCamelCase )} ) lowerCAmelCase_ : str = field( default=UpperCamelCase ,metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} ) lowerCAmelCase_ : int = field( default=1_28 ,metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } ,) lowerCAmelCase_ : int = field( default=1_28 ,metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} ,) lowerCAmelCase_ : int = field( default=64 ,metadata={ """help""": ( """The maximum number of tokens for the question. Questions longer than this will """ """be truncated to this length.""" ) } ,) lowerCAmelCase_ : int = field( default=30 ,metadata={ """help""": ( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ) } ,) lowerCAmelCase_ : bool = field( default=UpperCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) lowerCAmelCase_ : bool = field( default=UpperCamelCase ,metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} ) lowerCAmelCase_ : float = field( default=0.0 ,metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) lowerCAmelCase_ : int = field( default=20 ,metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) lowerCAmelCase_ : int = field( default=0 ,metadata={ """help""": ( """language id of input for language-specific xlm models (see""" """ tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)""" ) } ,) lowerCAmelCase_ : int = field(default=1 ,metadata={"""help""": """multiple threads for converting example to features"""} ) class UpperCAmelCase__ ( UpperCamelCase ): lowerCAmelCase_ : str = """train""" lowerCAmelCase_ : Union[str, Any] = """dev""" class UpperCAmelCase__ ( UpperCamelCase ): lowerCAmelCase_ : SquadDataTrainingArguments lowerCAmelCase_ : List[SquadFeatures] lowerCAmelCase_ : Split lowerCAmelCase_ : bool def __init__( self : List[Any] , snake_case : SquadDataTrainingArguments , snake_case : PreTrainedTokenizer , snake_case : Optional[int] = None , snake_case : Union[str, Split] = Split.train , snake_case : Optional[bool] = False , snake_case : Optional[str] = None , snake_case : Optional[str] = "pt" , ) -> Optional[Any]: '''simple docstring''' A = args A = is_language_sensitive A = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(snake_case , snake_case ): try: A = Split[mode] except KeyError: raise KeyError('mode is not a valid split name' ) A = mode # Load data features from cache or dataset file A = 'v2' if args.version_2_with_negative else 'v1' A = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A = cached_features_file + '.lock' with FileLock(snake_case ): if os.path.exists(snake_case ) and not args.overwrite_cache: A = time.time() A = torch.load(snake_case ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. A = self.old_features['features'] A = self.old_features.get('dataset' , snake_case ) A = self.old_features.get('examples' , snake_case ) logger.info( f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in""" ' future run' ) else: if mode == Split.dev: A = self.processor.get_dev_examples(args.data_dir ) else: A = self.processor.get_train_examples(args.data_dir ) A , A = squad_convert_examples_to_features( examples=self.examples , tokenizer=snake_case , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case , ) A = time.time() torch.save( {'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , snake_case , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self : Any ) -> Dict: '''simple docstring''' return len(self.features ) def __getitem__( self : Union[str, Any] , snake_case : List[str] ) -> Dict[str, torch.Tensor]: '''simple docstring''' A = self.features[i] A = torch.tensor(feature.input_ids , dtype=torch.long ) A = torch.tensor(feature.attention_mask , dtype=torch.long ) A = torch.tensor(feature.token_type_ids , dtype=torch.long ) A = torch.tensor(feature.cls_index , dtype=torch.long ) A = torch.tensor(feature.p_mask , dtype=torch.float ) A = torch.tensor(feature.is_impossible , dtype=torch.float ) A = { 'input_ids': input_ids, 'attention_mask': attention_mask, 'token_type_ids': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'cls_index': cls_index, 'p_mask': p_mask} ) if self.args.version_2_with_negative: inputs.update({'is_impossible': is_impossible} ) if self.is_language_sensitive: inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: A = torch.tensor(feature.start_position , dtype=torch.long ) A = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({'start_positions': start_positions, 'end_positions': end_positions} ) return inputs
109
0
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @staticmethod @abstractmethod def snake_case__ ( lowerCAmelCase__ : ArgumentParser ) -> Tuple: '''simple docstring''' raise NotImplementedError() @abstractmethod def snake_case__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' raise NotImplementedError()
98
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def a__ ( _UpperCamelCase : List[str] ): __lowerCamelCase = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __lowerCamelCase = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __lowerCamelCase = 4 __lowerCamelCase = 48 __lowerCamelCase = '''pixelshuffle_aux''' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __lowerCamelCase = [6, 6, 6, 6] __lowerCamelCase = 60 __lowerCamelCase = [6, 6, 6, 6] __lowerCamelCase = '''pixelshuffledirect''' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __lowerCamelCase = 4 __lowerCamelCase = '''nearest+conv''' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __lowerCamelCase = 1 __lowerCamelCase = 1 __lowerCamelCase = 1_26 __lowerCamelCase = 7 __lowerCamelCase = 255.0 __lowerCamelCase = '''''' return config def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ): if "patch_embed.proj" in name and "layers" not in name: __lowerCamelCase = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: __lowerCamelCase = name.replace('''patch_embed.norm''' ,'''embeddings.patch_embeddings.layernorm''' ) if "layers" in name: __lowerCamelCase = name.replace('''layers''' ,'''encoder.stages''' ) if "residual_group.blocks" in name: __lowerCamelCase = name.replace('''residual_group.blocks''' ,'''layers''' ) if "attn.proj" in name: __lowerCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' ) if "attn" in name: __lowerCamelCase = name.replace('''attn''' ,'''attention.self''' ) if "norm1" in name: __lowerCamelCase = name.replace('''norm1''' ,'''layernorm_before''' ) if "norm2" in name: __lowerCamelCase = name.replace('''norm2''' ,'''layernorm_after''' ) if "mlp.fc1" in name: __lowerCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' ) if "mlp.fc2" in name: __lowerCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' ) if "q_bias" in name: __lowerCamelCase = name.replace('''q_bias''' ,'''query.bias''' ) if "k_bias" in name: __lowerCamelCase = name.replace('''k_bias''' ,'''key.bias''' ) if "v_bias" in name: __lowerCamelCase = name.replace('''v_bias''' ,'''value.bias''' ) if "cpb_mlp" in name: __lowerCamelCase = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' ) if "patch_embed.proj" in name: __lowerCamelCase = name.replace('''patch_embed.proj''' ,'''patch_embed.projection''' ) if name == "norm.weight": __lowerCamelCase = '''layernorm.weight''' if name == "norm.bias": __lowerCamelCase = '''layernorm.bias''' if "conv_first" in name: __lowerCamelCase = name.replace('''conv_first''' ,'''first_convolution''' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __lowerCamelCase = name.replace('''conv_last''' ,'''final_convolution''' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __lowerCamelCase = name.replace('''conv_before_upsample.0''' ,'''conv_before_upsample''' ) if "upsample.0" in name: __lowerCamelCase = name.replace('''upsample.0''' ,'''upsample.convolution_0''' ) if "upsample.2" in name: __lowerCamelCase = name.replace('''upsample.2''' ,'''upsample.convolution_1''' ) __lowerCamelCase = '''upsample.''' + name elif config.upsampler == "pixelshuffledirect": __lowerCamelCase = name.replace('''upsample.0.weight''' ,'''upsample.conv.weight''' ) __lowerCamelCase = name.replace('''upsample.0.bias''' ,'''upsample.conv.bias''' ) else: pass else: __lowerCamelCase = '''swin2sr.''' + name return name def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Union[str, Any] ): for key in orig_state_dict.copy().keys(): __lowerCamelCase = orig_state_dict.pop(_UpperCamelCase ) if "qkv" in key: __lowerCamelCase = key.split('''.''' ) __lowerCamelCase = int(key_split[1] ) __lowerCamelCase = int(key_split[4] ) __lowerCamelCase = config.embed_dim if "weight" in key: __lowerCamelCase = val[:dim, :] __lowerCamelCase = val[dim : dim * 2, :] __lowerCamelCase = val[-dim:, :] else: __lowerCamelCase = val[:dim] __lowerCamelCase = val[dim : dim * 2] __lowerCamelCase = val[-dim:] pass else: __lowerCamelCase = val return orig_state_dict def a__ ( _UpperCamelCase : str ,_UpperCamelCase : int ,_UpperCamelCase : Any ): __lowerCamelCase = get_config(_UpperCamelCase ) __lowerCamelCase = SwinaSRForImageSuperResolution(_UpperCamelCase ) model.eval() __lowerCamelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase ,map_location='''cpu''' ) __lowerCamelCase = convert_state_dict(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) if len(_UpperCamelCase ) > 0: raise ValueError('''Missing keys when converting: {}'''.format(_UpperCamelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F"""Unexpected key {key} in state_dict""" ) # verify values __lowerCamelCase = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true''' __lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw ).convert('''RGB''' ) __lowerCamelCase = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __lowerCamelCase = 1_26 if '''Jpeg''' in checkpoint_url else 2_56 __lowerCamelCase = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ), ] ) __lowerCamelCase = transforms(_UpperCamelCase ).unsqueeze(0 ) if config.num_channels == 1: __lowerCamelCase = pixel_values[:, 0, :, :].unsqueeze(1 ) __lowerCamelCase = model(_UpperCamelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __lowerCamelCase = torch.Size([1, 3, 5_12, 5_12] ) __lowerCamelCase = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __lowerCamelCase = torch.Size([1, 3, 10_24, 10_24] ) __lowerCamelCase = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __lowerCamelCase = torch.Size([1, 3, 10_24, 10_24] ) __lowerCamelCase = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __lowerCamelCase = torch.Size([1, 3, 5_12, 5_12] ) __lowerCamelCase = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __lowerCamelCase = torch.Size([1, 3, 10_24, 10_24] ) __lowerCamelCase = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}""" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] ,_UpperCamelCase ,atol=1e-3 ) print('''Looks ok!''' ) __lowerCamelCase = { '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': ( '''swin2SR-classical-sr-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': ( '''swin2SR-classical-sr-x4-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': ( '''swin2SR-compressed-sr-x4-48''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': ( '''swin2SR-lightweight-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': ( '''swin2SR-realworld-sr-x4-64-bsrgan-psnr''' ), } __lowerCamelCase = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_UpperCamelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_UpperCamelCase ) if push_to_hub: model.push_to_hub(F"""caidas/{model_name}""" ) processor.push_to_hub(F"""caidas/{model_name}""" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") a_ = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
175
0
'''simple docstring''' def _A ( A = 1_0_0 ) -> Tuple: lowercase : Dict = (n * (n + 1) // 2) ** 2 lowercase : Union[str, Any] = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F'''{solution() = }''')
715
'''simple docstring''' from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function lowerCAmelCase : Any = 1.054571817E-34 # unit of ℏ : J * s lowerCAmelCase : List[str] = 3E8 # unit of c : m * s^-1 def _A ( A ,A ,A ) -> dict[str, float]: if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: lowercase : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowercase : Optional[int] = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowercase : List[str] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
425
0
from __future__ import annotations def snake_case (UpperCAmelCase__ ) -> float: if not nums: raise ValueError('List is empty' ) return sum(UpperCAmelCase__ ) / len(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
57
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() A_ : Tuple = logging.get_logger(__name__) A_ : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } A_ : int = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def snake_case (UpperCAmelCase__ ) -> str: UpperCamelCase_: Tuple = {} with open(UpperCAmelCase__ , 'r' ) as file: for line_number, line in enumerate(UpperCAmelCase__ ): UpperCamelCase_: List[Any] = line.strip() if line: UpperCamelCase_: List[Any] = line.split() UpperCamelCase_: Optional[Any] = line_number UpperCamelCase_: Any = words[0] UpperCamelCase_: List[Any] = value return result def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: for attribute in key.split('.' ): UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: str = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCAmelCase__ ): UpperCamelCase_: Any = PARAM_MAPPING[full_name.split('.' )[-1]] UpperCamelCase_: Dict = 'param' if weight_type is not None and weight_type != "param": UpperCamelCase_: Optional[Any] = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape elif weight_type is not None and weight_type == "param": UpperCamelCase_: Optional[Any] = hf_pointer for attribute in hf_param_name.split('.' ): UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: Tuple = shape_pointer.shape # let's reduce dimension UpperCamelCase_: int = value[0] else: UpperCamelCase_: Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase_: Optional[int] = value elif weight_type == "weight_g": UpperCamelCase_: Any = value elif weight_type == "weight_v": UpperCamelCase_: Union[str, Any] = value elif weight_type == "bias": UpperCamelCase_: Union[str, Any] = value elif weight_type == "param": for attribute in hf_param_name.split('.' ): UpperCamelCase_: Dict = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: Optional[Any] = value else: UpperCamelCase_: int = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any: UpperCamelCase_: Union[str, Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCAmelCase__ ): UpperCamelCase_: Dict = PARAM_MAPPING[full_name.split('.' )[-1]] UpperCamelCase_: List[Any] = 'param' if weight_type is not None and weight_type != "param": UpperCamelCase_: List[Any] = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCamelCase_: Any = '.'.join([key, hf_param_name] ) else: UpperCamelCase_: Union[str, Any] = key UpperCamelCase_: Any = value if 'lm_head' in full_key else value[0] A_ : str = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None ) -> Any: UpperCamelCase_: Optional[int] = False for key, mapped_key in MAPPING.items(): UpperCamelCase_: Tuple = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: UpperCamelCase_: Optional[Any] = True if "*" in mapped_key: UpperCamelCase_: Optional[int] = name.split(UpperCAmelCase__ )[0].split('.' )[-2] UpperCamelCase_: Any = mapped_key.replace('*' , UpperCAmelCase__ ) if "weight_g" in name: UpperCamelCase_: Union[str, Any] = 'weight_g' elif "weight_v" in name: UpperCamelCase_: Dict = 'weight_v' elif "bias" in name: UpperCamelCase_: int = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase_: str = 'weight' else: UpperCamelCase_: Union[str, Any] = None if hf_dict is not None: rename_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) else: set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return is_used return is_used def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: UpperCamelCase_: List[Any] = [] UpperCamelCase_: Dict = fairseq_model.state_dict() UpperCamelCase_: Optional[Any] = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase_: Union[str, Any] = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase_: List[Any] = True else: UpperCamelCase_: Tuple = load_wavaveca_layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if not is_used: unused_weights.append(UpperCAmelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any: UpperCamelCase_: Any = full_name.split('conv_layers.' )[-1] UpperCamelCase_: int = name.split('.' ) UpperCamelCase_: int = int(items[0] ) UpperCamelCase_: Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase_: Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase_: int = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase_: Union[str, Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase_: List[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase__ ) @torch.no_grad() def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=False ) -> Dict: if config_path is not None: UpperCamelCase_: Tuple = WavaVecaConfig.from_pretrained(UpperCAmelCase__ ) else: UpperCamelCase_: List[str] = WavaVecaConfig() if is_seq_class: UpperCamelCase_: int = read_txt_into_dict(UpperCAmelCase__ ) UpperCamelCase_: Tuple = idalabel UpperCamelCase_: str = WavaVecaForSequenceClassification(UpperCAmelCase__ ) UpperCamelCase_: Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ) feature_extractor.save_pretrained(UpperCAmelCase__ ) elif is_finetuned: if dict_path: UpperCamelCase_: List[Any] = Dictionary.load(UpperCAmelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase_: Dict = target_dict.pad_index UpperCamelCase_: Tuple = target_dict.bos_index UpperCamelCase_: Optional[Any] = target_dict.eos_index UpperCamelCase_: Union[str, Any] = len(target_dict.symbols ) UpperCamelCase_: int = os.path.join(UpperCAmelCase__ , 'vocab.json' ) if not os.path.isdir(UpperCAmelCase__ ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase__ ) ) return os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) UpperCamelCase_: str = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase_: List[str] = 0 UpperCamelCase_: List[Any] = 1 with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: Union[str, Any] = WavaVecaCTCTokenizer( UpperCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase__ , ) UpperCamelCase_: Any = True if config.feat_extract_norm == 'layer' else False UpperCamelCase_: Tuple = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , ) UpperCamelCase_: Dict = WavaVecaProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ ) processor.save_pretrained(UpperCAmelCase__ ) UpperCamelCase_: Any = WavaVecaForCTC(UpperCAmelCase__ ) else: UpperCamelCase_: Any = WavaVecaForPreTraining(UpperCAmelCase__ ) if is_finetuned or is_seq_class: UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: UpperCamelCase_: List[str] = argparse.Namespace(task='audio_pretraining' ) UpperCamelCase_: Any = fairseq.tasks.setup_task(UpperCAmelCase__ ) UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase__ ) UpperCamelCase_: str = model[0].eval() recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ , not is_finetuned ) hf_wavavec.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) A_ : int = parser.parse_args() A_ : str = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
57
1
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets _lowerCamelCase : int = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' _lowerCamelCase : List[str] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' _lowerCamelCase : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowercase ( datasets.Metric): def a_ ( self : Tuple ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ] , ) def a_ ( self : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : List[str]="auto" , _lowerCamelCase : Tuple=-1 , _lowerCamelCase : Union[str, Any]=0.9 , _lowerCamelCase : Tuple=5 , _lowerCamelCase : Any=5_00 , _lowerCamelCase : Dict="gpt2-large" , _lowerCamelCase : Union[str, Any]=-1 , _lowerCamelCase : Optional[Any]=10_24 , _lowerCamelCase : List[Any]=25 , _lowerCamelCase : str=5 , _lowerCamelCase : Any=True , _lowerCamelCase : Optional[int]=25 , ): """simple docstring""" A_ : Any = compute_mauve( p_text=_lowerCamelCase , q_text=_lowerCamelCase , p_features=_lowerCamelCase , q_features=_lowerCamelCase , p_tokens=_lowerCamelCase , q_tokens=_lowerCamelCase , num_buckets=_lowerCamelCase , pca_max_data=_lowerCamelCase , kmeans_explained_var=_lowerCamelCase , kmeans_num_redo=_lowerCamelCase , kmeans_max_iter=_lowerCamelCase , featurize_model_name=_lowerCamelCase , device_id=_lowerCamelCase , max_text_length=_lowerCamelCase , divergence_curve_discretization_size=_lowerCamelCase , mauve_scaling_factor=_lowerCamelCase , verbose=_lowerCamelCase , seed=_lowerCamelCase , ) return out
702
"""simple docstring""" from __future__ import annotations from collections import namedtuple def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" A_ : int = namedtuple('''result''' , '''name value''' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('''Only one argument must be 0''' ) elif power < 0: raise ValueError( '''Power cannot be negative in any electrical/electronics system''' ) elif voltage == 0: return result('''voltage''' , power / current ) elif current == 0: return result('''current''' , power / voltage ) elif power == 0: return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
361
0
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class __A : @staticmethod def lowerCamelCase__ ( *__snake_case : Tuple , **__snake_case : List[Any] ) -> Tuple: pass def a ( __UpperCAmelCase : Image ) -> str: __magic_name__: Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class __A ( unittest.TestCase ): UpperCAmelCase__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Dict ) -> Any: __magic_name__: Optional[int] = DepthEstimationPipeline(model=__snake_case , image_processor=__snake_case ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : List[str] , __snake_case : Any , __snake_case : List[str] ) -> Tuple: __magic_name__: Optional[int] = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , __snake_case ) import datasets __magic_name__: Optional[Any] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) __magic_name__: Union[str, Any] = depth_estimator( [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] ) self.assertEqual( [ {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, ] , __snake_case , ) @require_tf @unittest.skip("""Depth estimation is not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ) -> str: pass @slow @require_torch def lowerCamelCase__ ( self : Dict ) -> List[Any]: __magic_name__: Optional[Any] = """Intel/dpt-large""" __magic_name__: Any = pipeline("""depth-estimation""" , model=__snake_case ) __magic_name__: Optional[int] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) __magic_name__: Optional[int] = hashimage(outputs["""depth"""] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.662 ) @require_torch def lowerCamelCase__ ( self : Any ) -> Optional[Any]: # This is highly irregular to have no small tests. self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
96
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ) ->List[Any]: _SCREAMING_SNAKE_CASE = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("""module.cls_token""", """vit.embeddings.cls_token"""), ("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""module.pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""module.norm.weight""", """layernorm.weight"""), ("""module.norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def lowerCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=False ) ->Optional[int]: for i in range(config.num_hidden_layers ): if base_model: _SCREAMING_SNAKE_CASE = """""" else: _SCREAMING_SNAKE_CASE = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _SCREAMING_SNAKE_CASE = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' ) _SCREAMING_SNAKE_CASE = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE = in_proj_weight[ : config.hidden_size, : ] _SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size] _SCREAMING_SNAKE_CASE = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _SCREAMING_SNAKE_CASE = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _SCREAMING_SNAKE_CASE = in_proj_weight[ -config.hidden_size :, : ] _SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :] def lowerCamelCase ( __lowerCamelCase : Dict ) ->Dict: _SCREAMING_SNAKE_CASE = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(__lowerCamelCase , __lowerCamelCase ) def lowerCamelCase ( __lowerCamelCase : Union[str, Any] ) ->Union[str, Any]: # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. _SCREAMING_SNAKE_CASE = [ """module.fc.fc1.weight""", """module.fc.fc1.bias""", """module.fc.bn1.weight""", """module.fc.bn1.bias""", """module.fc.bn1.running_mean""", """module.fc.bn1.running_var""", """module.fc.bn1.num_batches_tracked""", """module.fc.fc2.weight""", """module.fc.fc2.bias""", """module.fc.bn2.weight""", """module.fc.bn2.bias""", """module.fc.bn2.running_mean""", """module.fc.bn2.running_var""", """module.fc.bn2.num_batches_tracked""", """module.fc.fc3.weight""", """module.fc.fc3.bias""", ] for k in ignore_keys: state_dict.pop(__lowerCamelCase , __lowerCamelCase ) def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ) ->Optional[Any]: _SCREAMING_SNAKE_CASE = dct.pop(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = val def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple ) ->List[str]: _SCREAMING_SNAKE_CASE = ViTMSNConfig() _SCREAMING_SNAKE_CASE = 1000 _SCREAMING_SNAKE_CASE = """datasets/huggingface/label-files""" _SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json""" _SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase ) , """r""" ) ) _SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE = idalabel _SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: _SCREAMING_SNAKE_CASE = 384 _SCREAMING_SNAKE_CASE = 1536 _SCREAMING_SNAKE_CASE = 6 elif "l16" in checkpoint_url: _SCREAMING_SNAKE_CASE = 1024 _SCREAMING_SNAKE_CASE = 4096 _SCREAMING_SNAKE_CASE = 24 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 0.1 elif "b4" in checkpoint_url: _SCREAMING_SNAKE_CASE = 4 elif "l7" in checkpoint_url: _SCREAMING_SNAKE_CASE = 7 _SCREAMING_SNAKE_CASE = 1024 _SCREAMING_SNAKE_CASE = 4096 _SCREAMING_SNAKE_CASE = 24 _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 0.1 _SCREAMING_SNAKE_CASE = ViTMSNModel(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""target_encoder"""] _SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size ) remove_projection_head(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = create_rename_keys(__lowerCamelCase , base_model=__lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , base_model=__lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) model.eval() _SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" _SCREAMING_SNAKE_CASE = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) _SCREAMING_SNAKE_CASE = ViTImageProcessor( size=config.image_size , image_mean=__lowerCamelCase , image_std=__lowerCamelCase ) _SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) _SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] ) elif "b16" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] ) elif "l16" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] ) elif "b4" in checkpoint_url: _SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] ) else: _SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , __lowerCamelCase , atol=1e-4 ) print(F'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(__lowerCamelCase ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowercase_ = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
314
0
'''simple docstring''' import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process __a = logging.getLogger(__name__) __a = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) __a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase )} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase_ : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class A__ : """simple docstring""" UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCamelCase_ : Optional[str] = field(default=UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCamelCase , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCamelCase_ : Optional[int] = field( default=5 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated. Default to the max input length of the model.''' ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCamelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCamelCase_ : float = field( default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) UpperCamelCase_ : bool = field( default=UpperCamelCase , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" if self.train_file is not None: _UpperCAmelCase : int = self.train_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: _UpperCAmelCase : List[Any] = self.validation_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def __UpperCAmelCase ( a_: int, a_: List[Any] ): with open(a_, "r", encoding="utf-8" ) as f: _UpperCAmelCase : Tuple = [json.loads(a_ ) for line in f.read().splitlines() if (len(a_ ) > 0 and not line.isspace())] assert len(a_ ) == len(a_ ) _UpperCAmelCase : str = {c: dataset[c] for c in dataset.column_names} _UpperCAmelCase : Optional[int] = refs return Dataset.from_dict(a_ ) def __UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. _UpperCAmelCase : Union[str, Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", a_ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _UpperCAmelCase : Optional[int] = load_dataset(data_args.dataset_name, data_args.dataset_config_name ) if "validation" not in datasets.keys(): _UpperCAmelCase : Optional[Any] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"""train[:{data_args.validation_split_percentage}%]""", ) _UpperCAmelCase : Union[str, Any] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"""train[{data_args.validation_split_percentage}%:]""", ) else: _UpperCAmelCase : Union[str, Any] = {} if data_args.train_file is not None: _UpperCAmelCase : Optional[Any] = data_args.train_file if data_args.validation_file is not None: _UpperCAmelCase : Optional[Any] = data_args.validation_file _UpperCAmelCase : Optional[Any] = data_args.train_file.split("." )[-1] if extension == "txt": _UpperCAmelCase : Optional[int] = "text" _UpperCAmelCase : Optional[Any] = load_dataset(a_, data_files=a_ ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase : Dict = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: _UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name, **a_ ) elif model_args.model_name_or_path: _UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path, **a_ ) else: _UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(f"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(f"""New config: {config}""" ) _UpperCAmelCase : List[Any] = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: _UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **a_ ) elif model_args.model_name_or_path: _UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **a_ ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: _UpperCAmelCase : List[Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=a_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) else: logger.info("Training new model from scratch" ) _UpperCAmelCase : Tuple = AutoModelForMaskedLM.from_config(a_ ) model.resize_token_embeddings(len(a_ ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: _UpperCAmelCase : str = datasets["train"].column_names else: _UpperCAmelCase : Tuple = datasets["validation"].column_names _UpperCAmelCase : Optional[Any] = "text" if "text" in column_names else column_names[0] _UpperCAmelCase : Tuple = "max_length" if data_args.pad_to_max_length else False def tokenize_function(a_: Tuple ): # Remove empty lines _UpperCAmelCase : str = [line for line in examples["text"] if len(a_ ) > 0 and not line.isspace()] return tokenizer(examples["text"], padding=a_, truncation=a_, max_length=data_args.max_seq_length ) _UpperCAmelCase : Union[str, Any] = datasets.map( a_, batched=a_, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, ) # Add the chinese references if provided if data_args.train_ref_file is not None: _UpperCAmelCase : List[str] = add_chinese_references(tokenized_datasets["train"], data_args.train_ref_file ) if data_args.validation_ref_file is not None: _UpperCAmelCase : Dict = add_chinese_references( tokenized_datasets["validation"], data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer _UpperCAmelCase : Tuple = data_args.train_ref_file or data_args.validation_ref_file if has_ref: _UpperCAmelCase : Optional[Any] = False # Data collator # This one will take care of randomly masking the tokens. _UpperCAmelCase : str = DataCollatorForWholeWordMask(tokenizer=a_, mlm_probability=data_args.mlm_probability ) # Initialize our Trainer _UpperCAmelCase : Optional[Any] = Trainer( model=a_, args=a_, train_dataset=tokenized_datasets["train"] if training_args.do_train else None, eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, tokenizer=a_, data_collator=a_, ) # Training if training_args.do_train: if last_checkpoint is not None: _UpperCAmelCase : int = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): _UpperCAmelCase : Union[str, Any] = model_args.model_name_or_path else: _UpperCAmelCase : str = None _UpperCAmelCase : List[str] = trainer.train(resume_from_checkpoint=a_ ) trainer.save_model() # Saves the tokenizer too for easy upload _UpperCAmelCase : Optional[Any] = os.path.join(training_args.output_dir, "train_results.txt" ) if trainer.is_world_process_zero(): with open(a_, "w" ) as writer: logger.info("***** Train results *****" ) for key, value in sorted(train_result.metrics.items() ): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json" ) ) # Evaluation _UpperCAmelCase : Union[str, Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) _UpperCAmelCase : Union[str, Any] = trainer.evaluate() _UpperCAmelCase : str = math.exp(eval_output["eval_loss"] ) _UpperCAmelCase : str = perplexity _UpperCAmelCase : str = os.path.join(training_args.output_dir, "eval_results_mlm_wwm.txt" ) if trainer.is_world_process_zero(): with open(a_, "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in sorted(results.items() ): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) return results def __UpperCAmelCase ( a_: Optional[Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
257
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { 'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimesformerModel', 'TimesformerForVideoClassification', 'TimesformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
257
1
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _UpperCAmelCase : List[Any] = False class lowercase_ ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class lowercase_ ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : List[Any] ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Any ) -> List[str]: _A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion', torch_dtype=torch.floataa ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) _A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _A = torch.manual_seed(0 ) _A = pipe.dual_guided( prompt='first prompt', image=_snake_case, text_to_image_strength=0.75, generator=_snake_case, guidance_scale=7.5, num_inference_steps=2, output_type='numpy', ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_snake_case ) _A = VersatileDiffusionPipeline.from_pretrained(_snake_case, torch_dtype=torch.floataa ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) _A = generator.manual_seed(0 ) _A = pipe.dual_guided( prompt='first prompt', image=_snake_case, text_to_image_strength=0.75, generator=_snake_case, guidance_scale=7.5, num_inference_steps=2, output_type='numpy', ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __UpperCAmelCase ( self : Dict ) -> List[str]: _A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion', torch_dtype=torch.floataa ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) _A = 'cyberpunk 2077' _A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _A = torch.manual_seed(0 ) _A = pipe.dual_guided( prompt=_snake_case, image=_snake_case, text_to_image_strength=0.75, generator=_snake_case, guidance_scale=7.5, num_inference_steps=50, output_type='numpy', ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 _A = 'A painting of a squirrel eating a burger ' _A = torch.manual_seed(0 ) _A = pipe.text_to_image( prompt=_snake_case, generator=_snake_case, guidance_scale=7.5, num_inference_steps=50, output_type='numpy' ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 _A = pipe.image_variation(_snake_case, generator=_snake_case, output_type='numpy' ).images _A = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
107
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowerCamelCase (unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : List[Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" ) SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" SCREAMING_SNAKE_CASE__ = model(_snake_case )["last_hidden_state"] SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _snake_case ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
159
0
"""simple docstring""" import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput A: str = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): def __init__( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = eval_examples UpperCAmelCase : List[Any] = post_process_function UpperCAmelCase : List[Any] = quant_trainer_args UpperCAmelCase : Dict = 128 # default number of calibration samples def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None ) -> Optional[int]: '''simple docstring''' if calib_dataset is None and self.calib_dataset is None: raise ValueError("""Trainer: calibration requires an calib_dataset.""" ) UpperCAmelCase : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset UpperCAmelCase : Tuple = self._remove_unused_columns(_SCREAMING_SNAKE_CASE , description="""Calibration""" ) return DataLoader( _SCREAMING_SNAKE_CASE , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_SCREAMING_SNAKE_CASE , ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None ) -> Tuple: '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.train_dataset if calib_dataset is None else calib_dataset UpperCAmelCase : Tuple = self.get_calib_dataloader(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = self.model quant_trainer.configure_model(_SCREAMING_SNAKE_CASE , self.quant_trainer_args , calib=_SCREAMING_SNAKE_CASE ) model.eval() quant_trainer.enable_calibration(_SCREAMING_SNAKE_CASE ) logger.info("""***** Running calibration *****""" ) logger.info(F" Num examples = {self.calib_num}" ) logger.info(F" Batch size = {calib_dataloader.batch_size}" ) for step, inputs in enumerate(_SCREAMING_SNAKE_CASE ): # Prediction step UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = self.prediction_step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(_SCREAMING_SNAKE_CASE , self.quant_trainer_args ) UpperCAmelCase : Optional[int] = model def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "eval" ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : str = self.eval_dataset if eval_dataset is None else eval_dataset UpperCAmelCase : int = self.get_eval_dataloader(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase : Dict = self.compute_metrics UpperCAmelCase : int = None UpperCAmelCase : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase : Dict = eval_loop( _SCREAMING_SNAKE_CASE , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , ) finally: UpperCAmelCase : str = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: UpperCAmelCase : Optional[Any] = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions ) UpperCAmelCase : str = self.compute_metrics(_SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): UpperCAmelCase : str = metrics.pop(_SCREAMING_SNAKE_CASE ) self.log(_SCREAMING_SNAKE_CASE ) else: UpperCAmelCase : Union[str, Any] = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCAmelCase : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , _SCREAMING_SNAKE_CASE ) return metrics def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "test" ) -> List[str]: '''simple docstring''' UpperCAmelCase : int = self.get_test_dataloader(_SCREAMING_SNAKE_CASE ) # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase : str = self.compute_metrics UpperCAmelCase : int = None UpperCAmelCase : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase : Union[str, Any] = eval_loop( _SCREAMING_SNAKE_CASE , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , ) finally: UpperCAmelCase : Any = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output UpperCAmelCase : Optional[int] = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions , """predict""" ) UpperCAmelCase : str = self.compute_metrics(_SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): UpperCAmelCase : List[str] = metrics.pop(_SCREAMING_SNAKE_CASE ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE="./" ) -> List[str]: '''simple docstring''' UpperCAmelCase : Any = self.eval_dataset UpperCAmelCase : Optional[Any] = self.get_eval_dataloader(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = next(iter(_SCREAMING_SNAKE_CASE ) ) # saving device - to make it consistent UpperCAmelCase : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) # convert to tuple UpperCAmelCase : List[Any] = tuple(v.to(_SCREAMING_SNAKE_CASE ) for k, v in batch.items() ) logger.info("""Converting model to be onnx compatible""" ) from pytorch_quantization.nn import TensorQuantizer UpperCAmelCase : List[Any] = True UpperCAmelCase : Any = self.model.to(_SCREAMING_SNAKE_CASE ) model.eval() model.float() UpperCAmelCase : str = model.module if hasattr(_SCREAMING_SNAKE_CASE , """module""" ) else model quant_trainer.configure_model(_SCREAMING_SNAKE_CASE , self.quant_trainer_args ) UpperCAmelCase : Tuple = os.path.join(_SCREAMING_SNAKE_CASE , """model.onnx""" ) logger.info(F"exporting model to {output_model_file}" ) UpperCAmelCase : List[Any] = {0: """batch_size""", 1: """seq_len"""} torch.onnx.export( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , export_params=_SCREAMING_SNAKE_CASE , opset_version=13 , do_constant_folding=_SCREAMING_SNAKE_CASE , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={ """input_ids""": axes, """attention_mask""": axes, """token_type_ids""": axes, """output_start_logits""": axes, """output_end_logits""": axes, } , verbose=_SCREAMING_SNAKE_CASE , ) logger.info("""onnx export finished""" )
359
"""simple docstring""" import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : str = tempfile.mkdtemp() UpperCAmelCase : str = 8 # DPR tok UpperCAmelCase : int = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , """dpr_tokenizer""" ) os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = os.path.join(_SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok UpperCAmelCase : int = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCAmelCase : Tuple = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) ) UpperCAmelCase : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCAmelCase : List[Any] = {"""unk_token""": """<unk>"""} UpperCAmelCase : int = os.path.join(self.tmpdirname , """bart_tokenizer""" ) os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE ( self ) -> DPRQuestionEncoderTokenizer: '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) ) def SCREAMING_SNAKE_CASE ( self ) -> DPRContextEncoderTokenizer: '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) ) def SCREAMING_SNAKE_CASE ( self ) -> BartTokenizer: '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Dict = Dataset.from_dict( { """id""": ["""0""", """1"""], """text""": ["""foo""", """bar"""], """title""": ["""Foo""", """Bar"""], """embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.get_dummy_dataset() UpperCAmelCase : List[Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset: UpperCAmelCase : Tuple = dataset UpperCAmelCase : List[str] = RagRetriever( _SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' UpperCAmelCase : List[Any] = self.get_dummy_dataset() UpperCAmelCase : Any = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , ) if from_disk: UpperCAmelCase : str = os.path.join(self.tmpdirname , """dataset""" ) UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , """index.faiss""" ) dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) ) dataset.drop_index("""embeddings""" ) dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) ) del dataset UpperCAmelCase : List[Any] = RagRetriever( _SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: UpperCAmelCase : Optional[Any] = RagRetriever( _SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _SCREAMING_SNAKE_CASE ) , ) return retriever def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] = Dataset.from_dict( { """id""": ["""0""", """1"""], """text""": ["""foo""", """bar"""], """title""": ["""Foo""", """Bar"""], """embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" ) dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" ) pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) ) UpperCAmelCase : Dict = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" ) UpperCAmelCase : Any = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset} pickle.dump(_SCREAMING_SNAKE_CASE , open(_SCREAMING_SNAKE_CASE , """wb""" ) ) UpperCAmelCase : Dict = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , ) UpperCAmelCase : str = RagRetriever( _SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' UpperCAmelCase : int = 1 UpperCAmelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever() UpperCAmelCase : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=_SCREAMING_SNAKE_CASE ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset: UpperCAmelCase : Optional[int] = self.get_dummy_dataset() retriever.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase : Optional[Any] = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=1 ) self.assertTrue(out is not None ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Tuple = 1 UpperCAmelCase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=_SCREAMING_SNAKE_CASE ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase : List[str] = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=1 ) self.assertTrue(out is not None ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Optional[Any] = 1 UpperCAmelCase : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=_SCREAMING_SNAKE_CASE ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[Any] = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase : int = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=1 ) self.assertTrue(out is not None ) def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : Tuple = 1 UpperCAmelCase : List[Any] = self.get_dummy_legacy_index_retriever() UpperCAmelCase : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=_SCREAMING_SNAKE_CASE ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""text"""] ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : str = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase : Tuple = retriever.retrieve(_SCREAMING_SNAKE_CASE , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' import torch UpperCAmelCase : Dict = 1 UpperCAmelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever() UpperCAmelCase : List[str] = [[5, 7], [10, 11]] UpperCAmelCase : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase : Union[str, Any] = retriever(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=_SCREAMING_SNAKE_CASE ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = ( out["""context_input_ids"""], out["""context_attention_mask"""], out["""retrieved_doc_embeds"""], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) UpperCAmelCase : Any = retriever( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = ( # noqa: F841 out["""context_input_ids"""], out["""context_attention_mask"""], out["""retrieved_doc_embeds"""], out["""doc_ids"""], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : int = self.get_dpr_ctx_encoder_tokenizer() UpperCAmelCase : Any = 1 UpperCAmelCase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=_SCREAMING_SNAKE_CASE ) retriever.set_ctx_encoder_tokenizer(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = [[5, 7], [10, 11]] UpperCAmelCase : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) UpperCAmelCase : Tuple = retriever(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=_SCREAMING_SNAKE_CASE ) self.assertEqual( len(_SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , _SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
359
1
from __future__ import annotations from collections import Counter from random import random class UpperCAmelCase_ : '''simple docstring''' def __init__( self ) -> int: snake_case_ : Dict = {} def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> None: snake_case_ : List[Any] = {} def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: if nodea not in self.connections: self.add_node(_SCREAMING_SNAKE_CASE ) if nodea not in self.connections: self.add_node(_SCREAMING_SNAKE_CASE ) snake_case_ : List[Any] = probability def _lowerCAmelCase ( self ) -> list[str]: return list(self.connections ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> str: snake_case_ : Tuple = 0 snake_case_ : int = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def lowerCAmelCase__ ( _a : str , _a : list[tuple[str, str, float]] , _a : int ): snake_case_ : Tuple = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_a , _a , _a ) snake_case_ : Dict = Counter(graph.get_nodes() ) snake_case_ : Optional[int] = start for _ in range(_a ): snake_case_ : int = graph.transition(_a ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
568
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A : Dict = 'openai/whisper-base' A : Optional[Any] = ( 'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the ' 'transcribed text.' ) A : Dict = 'transcriber' A : Any = WhisperProcessor A : Any = WhisperForConditionalGeneration A : Union[str, Any] = ['audio'] A : Optional[int] = ['text'] def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: return self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_features def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: return self.model.generate(inputs=_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Dict: return self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )[0]
568
1
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase : def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=3 , lowerCAmelCase__=32 , lowerCAmelCase__=3 , lowerCAmelCase__=10 , lowerCAmelCase__=[10, 20, 30, 40] , lowerCAmelCase__=[1, 1, 2, 1] , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=3 , lowerCAmelCase__=None , ): _A= parent _A= batch_size _A= image_size _A= num_channels _A= embeddings_size _A= hidden_sizes _A= depths _A= is_training _A= use_labels _A= hidden_act _A= num_labels _A= scope _A= len(_A ) def a__ ( self ): _A= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A= None if self.use_labels: _A= ids_tensor([self.batch_size] , self.num_labels ) _A= self.get_config() return config, pixel_values, labels def a__ ( self ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): _A= TFRegNetModel(config=_A ) _A= model(_A , training=_A ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): _A= self.num_labels _A= TFRegNetForImageClassification(_A ) _A= model(_A , labels=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self ): _A= self.prepare_config_and_inputs() _A, _A, _A= config_and_inputs _A= {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase ( _a , _a , unittest.TestCase ): _SCREAMING_SNAKE_CASE : List[Any] =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () _SCREAMING_SNAKE_CASE : Optional[Any] =( {"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification} if is_tf_available() else {} ) _SCREAMING_SNAKE_CASE : Union[str, Any] =False _SCREAMING_SNAKE_CASE : Union[str, Any] =False _SCREAMING_SNAKE_CASE : List[str] =False _SCREAMING_SNAKE_CASE : List[str] =False _SCREAMING_SNAKE_CASE : List[Any] =False def a__ ( self ): _A= TFRegNetModelTester(self ) _A= ConfigTester(self , config_class=_A , has_text_modality=_A ) def a__ ( self ): return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def a__ ( self ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def a__ ( self ): super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def a__ ( self ): pass def a__ ( self ): _A, _A= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A= model_class(_A ) _A= inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A= [*signature.parameters.keys()] _A= ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def a__ ( self ): _A= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def a__ ( self ): def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): _A= model_class(_A ) _A= model(**self._prepare_for_class(_A , _A ) , training=_A ) _A= outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _A= self.model_tester.num_stages self.assertEqual(len(_A ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) _A, _A= self.model_tester.prepare_config_and_inputs_for_common() _A= ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _A= layer_type _A= True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A= True check_hidden_states_output(_A , _A , _A ) def a__ ( self ): _A, _A= self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__={} ): _A= model(_A , return_dict=_A , **_A ) _A= model(_A , return_dict=_A , **_A ).to_tuple() def recursive_check(lowerCAmelCase__ , lowerCAmelCase__ ): if isinstance(_A , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_A , _A ): recursive_check(_A , _A ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(_A , _A ) ) , msg=( 'Tuple and dict output are not equal. Difference:' f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}" ) , ) recursive_check(_A , _A ) for model_class in self.all_model_classes: _A= model_class(_A ) _A= self._prepare_for_class(_A , _A ) _A= self._prepare_for_class(_A , _A ) check_equivalence(_A , _A , _A ) _A= self._prepare_for_class(_A , _A , return_labels=_A ) _A= self._prepare_for_class(_A , _A , return_labels=_A ) check_equivalence(_A , _A , _A ) _A= self._prepare_for_class(_A , _A ) _A= self._prepare_for_class(_A , _A ) check_equivalence(_A , _A , _A , {'output_hidden_states': True} ) _A= self._prepare_for_class(_A , _A , return_labels=_A ) _A= self._prepare_for_class(_A , _A , return_labels=_A ) check_equivalence(_A , _A , _A , {'output_hidden_states': True} ) def a__ ( self ): _A= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def a__ ( self ): for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A= TFRegNetModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def UpperCamelCase ( ) -> str: '''simple docstring''' _A= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCAmelCase ( unittest.TestCase ): @cached_property def a__ ( self ): return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def a__ ( self ): _A= TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _A= self.default_image_processor _A= prepare_img() _A= image_processor(images=_A , return_tensors='tf' ) # forward pass _A= model(**_A , training=_A ) # verify the logits _A= tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _A ) _A= tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 )
714
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase ( _a , _a , _a , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] =AltDiffusionPipeline _SCREAMING_SNAKE_CASE : int =TEXT_TO_IMAGE_PARAMS _SCREAMING_SNAKE_CASE : List[Any] =TEXT_TO_IMAGE_BATCH_PARAMS _SCREAMING_SNAKE_CASE : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS _SCREAMING_SNAKE_CASE : Any =TEXT_TO_IMAGE_IMAGE_PARAMS def a__ ( self ): torch.manual_seed(0 ) _A= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) _A= DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , ) torch.manual_seed(0 ) _A= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) _A= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , ) _A= CLIPTextModel(lowerCAmelCase__ ) _A= XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) _A= 77 _A= { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def a__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ): if str(lowerCAmelCase__ ).startswith('mps' ): _A= torch.manual_seed(lowerCAmelCase__ ) else: _A= torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) _A= { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a__ ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def a__ ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def a__ ( self ): _A= 'cpu' # ensure determinism for the device-dependent torch.Generator _A= self.get_dummy_components() torch.manual_seed(0 ) _A= RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder _A= RobertaSeriesModelWithTransformation(lowerCAmelCase__ ) _A= text_encoder _A= AltDiffusionPipeline(**lowerCAmelCase__ ) _A= alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _A= self.get_dummy_inputs(lowerCAmelCase__ ) _A= 'A photo of an astronaut' _A= alt_pipe(**lowerCAmelCase__ ) _A= output.images _A= image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _A= np.array( [0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ ( self ): _A= 'cpu' # ensure determinism for the device-dependent torch.Generator _A= self.get_dummy_components() _A= PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) torch.manual_seed(0 ) _A= RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder _A= RobertaSeriesModelWithTransformation(lowerCAmelCase__ ) _A= text_encoder _A= AltDiffusionPipeline(**lowerCAmelCase__ ) _A= alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _A= self.get_dummy_inputs(lowerCAmelCase__ ) _A= alt_pipe(**lowerCAmelCase__ ) _A= output.images _A= image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _A= np.array( [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCAmelCase ( unittest.TestCase ): def a__ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self ): # make sure here that pndm scheduler skips prk _A= AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=lowerCAmelCase__ ) _A= alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _A= 'A painting of a squirrel eating a burger' _A= torch.manual_seed(0 ) _A= alt_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' ) _A= output.images _A= image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _A= np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ ( self ): _A= DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' ) _A= AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ ) _A= alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) _A= 'A painting of a squirrel eating a burger' _A= torch.manual_seed(0 ) _A= alt_pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type='numpy' ) _A= output.images _A= image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _A= np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
476
0
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) class snake_case_ ( lowerCamelCase_ ): """simple docstring""" A_ = ['''input_features''', '''is_longer'''] def __init__( self , lowerCamelCase_=6_4 , lowerCamelCase_=4_8_0_0_0 , lowerCamelCase_=4_8_0 , lowerCamelCase_=1_0 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=0.0 , lowerCamelCase_=False , lowerCamelCase_ = 0 , lowerCamelCase_ = 1_4_0_0_0 , lowerCamelCase_ = None , lowerCamelCase_ = "fusion" , lowerCamelCase_ = "repeatpad" , **lowerCamelCase_ , ) -> Optional[Any]: super().__init__( feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , ) UpperCamelCase = top_db UpperCamelCase = truncation UpperCamelCase = padding UpperCamelCase = fft_window_size UpperCamelCase = (fft_window_size >> 1) + 1 UpperCamelCase = hop_length UpperCamelCase = max_length_s UpperCamelCase = max_length_s * sampling_rate UpperCamelCase = sampling_rate UpperCamelCase = frequency_min UpperCamelCase = frequency_max UpperCamelCase = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm=lowerCamelCase_ , mel_scale='''htk''' , ) UpperCamelCase = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase_ , min_frequency=lowerCamelCase_ , max_frequency=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , ) def UpperCAmelCase__ ( self) -> Dict[str, Any]: UpperCamelCase = copy.deepcopy(self.__dict__) UpperCamelCase = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> np.ndarray: UpperCamelCase = spectrogram( lowerCamelCase_ , window_function(self.fft_window_size , '''hann''') , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase_ , log_mel='''dB''' , ) return log_mel_spectrogram.T def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Union[str, Any]: UpperCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3) if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk UpperCamelCase = [0] if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk UpperCamelCase = [0] # randomly choose index for each part UpperCamelCase = np.random.choice(ranges[0]) UpperCamelCase = np.random.choice(ranges[1]) UpperCamelCase = np.random.choice(ranges[2]) UpperCamelCase = mel[idx_front : idx_front + chunk_frames, :] UpperCamelCase = mel[idx_middle : idx_middle + chunk_frames, :] UpperCamelCase = mel[idx_back : idx_back + chunk_frames, :] UpperCamelCase = torch.tensor(mel[None, None, :]) UpperCamelCase = torch.nn.functional.interpolate( lowerCamelCase_ , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=lowerCamelCase_) UpperCamelCase = mel_shrink[0][0].numpy() UpperCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0) return mel_fusion def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.array: if waveform.shape[0] > max_length: if truncation == "rand_trunc": UpperCamelCase = True # random crop to max_length (for compatibility) -> this should be handled by self.pad UpperCamelCase = len(lowerCamelCase_) - max_length UpperCamelCase = np.random.randint(0 , overflow + 1) UpperCamelCase = waveform[idx : idx + max_length] UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney)[None, :] elif truncation == "fusion": UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters) UpperCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed UpperCamelCase = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. UpperCamelCase = np.stack([mel, mel, mel, mel] , axis=0) UpperCamelCase = False else: UpperCamelCase = self._random_mel_fusion(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) UpperCamelCase = True else: raise NotImplementedError(F'data_truncating {truncation} not implemented') else: UpperCamelCase = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": UpperCamelCase = int(max_length / len(lowerCamelCase_)) UpperCamelCase = np.stack(np.tile(lowerCamelCase_ , n_repeat + 1))[:max_length] if padding == "repeatpad": UpperCamelCase = int(max_length / len(lowerCamelCase_)) UpperCamelCase = np.stack(np.tile(lowerCamelCase_ , lowerCamelCase_)) UpperCamelCase = np.pad(lowerCamelCase_ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0) if truncation == "fusion": UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters) UpperCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0) else: UpperCamelCase = self._np_extract_fbank_features(lowerCamelCase_ , self.mel_filters_slaney)[None, :] return input_mel, longer def __call__( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> BatchFeature: UpperCamelCase = truncation if truncation is not None else self.truncation UpperCamelCase = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' F' was sampled with {self.sampling_rate} and not {sampling_rate}.') else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''') UpperCamelCase = isinstance(lowerCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}') UpperCamelCase = is_batched_numpy or ( isinstance(lowerCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: UpperCamelCase = [np.asarray(lowerCamelCase_ , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray): UpperCamelCase = np.asarray(lowerCamelCase_ , dtype=np.floataa) elif isinstance(lowerCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): UpperCamelCase = raw_speech.astype(np.floataa) # always return batch if not is_batched: UpperCamelCase = [np.asarray(lowerCamelCase_)] # convert to mel spectrogram, truncate and pad if needed. UpperCamelCase = [ self._get_input_mel(lowerCamelCase_ , max_length if max_length else self.nb_max_samples , lowerCamelCase_ , lowerCamelCase_) for waveform in raw_speech ] UpperCamelCase = [] UpperCamelCase = [] for mel, longer in padded_inputs: input_mel.append(lowerCamelCase_) is_longer.append(lowerCamelCase_) if truncation == "fusion" and sum(lowerCamelCase_) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer UpperCamelCase = np.random.randint(0 , len(lowerCamelCase_)) UpperCamelCase = True if isinstance(input_mel[0] , lowerCamelCase_): UpperCamelCase = [np.asarray(lowerCamelCase_ , dtype=np.floataa) for feature in input_mel] # is_longer is a list of bool UpperCamelCase = [[longer] for longer in is_longer] UpperCamelCase = {'''input_features''': input_mel, '''is_longer''': is_longer} UpperCamelCase = BatchFeature(lowerCamelCase_) if return_tensors is not None: UpperCamelCase = input_features.convert_to_tensors(lowerCamelCase_) return input_features
34
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ): '''simple docstring''' _a : List[Any] = tau * frequency / samplerate _a : Tuple = sin(A ) _a : List[Any] = cos(A ) _a : Union[str, Any] = _sin / (2 * q_factor) _a : Dict = (1 - _cos) / 2 _a : Any = 1 - _cos _a : Any = 1 + alpha _a : int = -2 * _cos _a : str = 1 - alpha _a : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ): '''simple docstring''' _a : int = tau * frequency / samplerate _a : int = sin(A ) _a : Union[str, Any] = cos(A ) _a : int = _sin / (2 * q_factor) _a : Dict = (1 + _cos) / 2 _a : int = -1 - _cos _a : Optional[int] = 1 + alpha _a : str = -2 * _cos _a : Dict = 1 - alpha _a : List[str] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ): '''simple docstring''' _a : str = tau * frequency / samplerate _a : Dict = sin(A ) _a : int = cos(A ) _a : Dict = _sin / (2 * q_factor) _a : List[Any] = _sin / 2 _a : List[str] = 0 _a : Dict = -ba _a : List[Any] = 1 + alpha _a : Union[str, Any] = -2 * _cos _a : List[Any] = 1 - alpha _a : int = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCAmelCase_ ( A , A , A = 1 / sqrt(2 ) ): '''simple docstring''' _a : Optional[Any] = tau * frequency / samplerate _a : Tuple = sin(A ) _a : Tuple = cos(A ) _a : Dict = _sin / (2 * q_factor) _a : List[Any] = 1 - alpha _a : int = -2 * _cos _a : List[Any] = 1 + alpha _a : str = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ): '''simple docstring''' _a : Union[str, Any] = tau * frequency / samplerate _a : str = sin(A ) _a : str = cos(A ) _a : List[Any] = _sin / (2 * q_factor) _a : Optional[Any] = 1_0 ** (gain_db / 4_0) _a : Dict = 1 + alpha * big_a _a : str = -2 * _cos _a : Tuple = 1 - alpha * big_a _a : Tuple = 1 + alpha / big_a _a : str = -2 * _cos _a : Union[str, Any] = 1 - alpha / big_a _a : Optional[int] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ): '''simple docstring''' _a : Optional[int] = tau * frequency / samplerate _a : List[str] = sin(A ) _a : Tuple = cos(A ) _a : Union[str, Any] = _sin / (2 * q_factor) _a : str = 1_0 ** (gain_db / 4_0) _a : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos _a : List[str] = (big_a + 1) + (big_a - 1) * _cos _a : List[Any] = (big_a - 1) - (big_a + 1) * _cos _a : Dict = (big_a - 1) + (big_a + 1) * _cos _a : Tuple = 2 * sqrt(A ) * alpha _a : Any = big_a * (pmc + aaa) _a : Optional[int] = 2 * big_a * mpc _a : Dict = big_a * (pmc - aaa) _a : List[str] = ppmc + aaa _a : int = -2 * pmpc _a : Tuple = ppmc - aaa _a : Tuple = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def UpperCAmelCase_ ( A , A , A , A = 1 / sqrt(2 ) , ): '''simple docstring''' _a : Dict = tau * frequency / samplerate _a : Tuple = sin(A ) _a : Any = cos(A ) _a : int = _sin / (2 * q_factor) _a : str = 1_0 ** (gain_db / 4_0) _a : List[Any] = (big_a + 1) - (big_a - 1) * _cos _a : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos _a : List[Any] = (big_a - 1) - (big_a + 1) * _cos _a : List[str] = (big_a - 1) + (big_a + 1) * _cos _a : Union[str, Any] = 2 * sqrt(A ) * alpha _a : Optional[Any] = big_a * (ppmc + aaa) _a : List[str] = -2 * big_a * pmpc _a : Any = big_a * (ppmc - aaa) _a : List[Any] = pmc + aaa _a : Tuple = 2 * mpc _a : List[Any] = pmc - aaa _a : str = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
120
0
import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaProcessor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''): lowerCamelCase : Optional[int] = True from torch.cuda.amp import autocast lowerCamelCase : Optional[int] = logging.getLogger(__name__) def snake_case_ ( lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=None ): return field(default_factory=lambda: default , metadata=lowerCAmelCase_ ) @dataclass class lowerCAmelCase : '''simple docstring''' _A : str = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) _A : Optional[str] = field( default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) _A : Optional[bool] = field( default=__a , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) _A : Optional[float] = field( default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} ) _A : Optional[float] = field( default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} ) _A : Optional[float] = field( default=0.1 , metadata={ '''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.''' } , ) _A : Optional[float] = field( default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , ) _A : Optional[float] = field( default=0.0_5 , metadata={ '''help''': ( '''Propability of each feature vector along the time axis to be chosen as the start of the vector''' '''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature''' '''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.''' ) } , ) _A : Optional[float] = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} ) @dataclass class lowerCAmelCase : '''simple docstring''' _A : Optional[str] = field( default=__a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) _A : Optional[str] = field( default='''train+validation''' , metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } , ) _A : bool = field( default=__a , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) _A : Optional[int] = field( default=__a , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) _A : Optional[int] = field( default=__a , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) _A : Optional[int] = field( default=__a , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of validation examples to this ''' '''value if set.''' ) } , ) _A : List[str] = list_field( default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , ) @dataclass class lowerCAmelCase : '''simple docstring''' _A : WavaVecaProcessor _A : Union[bool, str] = True _A : Optional[int] = None _A : Optional[int] = None _A : Optional[int] = None _A : Optional[int] = None def __call__( self : Optional[Any] , __a : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]: """simple docstring""" __lowercase : Dict = [{"""input_values""": feature["""input_values"""]} for feature in features] __lowercase : int = [{"""input_ids""": feature["""labels"""]} for feature in features] __lowercase : Dict = self.processor.pad( __a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , ) __lowercase : Dict = self.processor.pad( labels=__a , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , ) # replace padding with -100 to ignore loss correctly __lowercase : Optional[int] = labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 ) __lowercase : Optional[Any] = labels return batch class lowerCAmelCase ( __a ): '''simple docstring''' def lowerCAmelCase ( self : Optional[int] , __a : nn.Module , __a : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor: """simple docstring""" model.train() __lowercase : str = self._prepare_inputs(__a ) if self.use_amp: with autocast(): __lowercase : Any = self.compute_loss(__a , __a ) else: __lowercase : int = self.compute_loss(__a , __a ) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": __lowercase : str = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __lowercase : Optional[int] = loss.sum() / (inputs["""labels"""] >= 0).sum() else: raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" ) if self.args.gradient_accumulation_steps > 1: __lowercase : List[str] = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(__a ).backward() elif self.use_apex: with amp.scale_loss(__a , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(__a ) else: loss.backward() return loss.detach() def snake_case_ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowercase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowercase , __lowercase , __lowercase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowercase , __lowercase , __lowercase : List[str] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. __lowercase : Optional[int] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowercase : Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase_ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: __lowercase : Union[str, Any] = datasets.load_dataset( """common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name ) __lowercase : List[str] = datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" ) # Create and save tokenizer __lowercase : str = F"[{''.join(data_args.chars_to_ignore )}]" def remove_special_characters(lowerCAmelCase_ : Any ): __lowercase : Any = re.sub(lowerCAmelCase_ , """""" , batch["""sentence"""] ).lower() + """ """ return batch __lowercase : int = train_dataset.map(lowerCAmelCase_ , remove_columns=["""sentence"""] ) __lowercase : Tuple = eval_dataset.map(lowerCAmelCase_ , remove_columns=["""sentence"""] ) def extract_all_chars(lowerCAmelCase_ : Optional[Any] ): __lowercase : Union[str, Any] = """ """.join(batch["""text"""] ) __lowercase : str = list(set(lowerCAmelCase_ ) ) return {"vocab": [vocab], "all_text": [all_text]} __lowercase : Optional[int] = train_dataset.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , batch_size=-1 , keep_in_memory=lowerCAmelCase_ , remove_columns=train_dataset.column_names , ) __lowercase : Optional[Any] = train_dataset.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , batch_size=-1 , keep_in_memory=lowerCAmelCase_ , remove_columns=eval_dataset.column_names , ) __lowercase : List[Any] = list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) ) __lowercase : Optional[int] = {v: k for k, v in enumerate(lowerCAmelCase_ )} __lowercase : Any = vocab_dict[""" """] del vocab_dict[" "] __lowercase : List[Any] = len(lowerCAmelCase_ ) __lowercase : Optional[Any] = len(lowerCAmelCase_ ) with open("""vocab.json""" , """w""" ) as vocab_file: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowercase : Union[str, Any] = WavaVecaCTCTokenizer( """vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , ) __lowercase : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ ) __lowercase : str = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) __lowercase : List[Any] = WavaVecaForCTC.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , ) if data_args.max_train_samples is not None: __lowercase : Union[str, Any] = min(len(lowerCAmelCase_ ) , data_args.max_train_samples ) __lowercase : Optional[int] = train_dataset.select(range(lowerCAmelCase_ ) ) if data_args.max_val_samples is not None: __lowercase : int = eval_dataset.select(range(data_args.max_val_samples ) ) __lowercase : List[str] = torchaudio.transforms.Resample(48000 , 16000 ) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(lowerCAmelCase_ : List[Any] ): __lowercase , __lowercase : Optional[int] = torchaudio.load(batch["""path"""] ) __lowercase : Tuple = resampler(lowerCAmelCase_ ).squeeze().numpy() __lowercase : Any = 16000 __lowercase : Union[str, Any] = batch["""text"""] return batch __lowercase : List[Any] = train_dataset.map( lowerCAmelCase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) __lowercase : Dict = eval_dataset.map( lowerCAmelCase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) def prepare_dataset(lowerCAmelCase_ : str ): # check that all files have the correct sampling rate assert ( len(set(batch["""sampling_rate"""] ) ) == 1 ), F"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}." __lowercase : str = processor( audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] ) batch.update(lowerCAmelCase_ ) return batch __lowercase : Any = train_dataset.map( lowerCAmelCase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , ) __lowercase : Optional[int] = eval_dataset.map( lowerCAmelCase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , ) # Metric __lowercase : Optional[Any] = datasets.load_metric("""wer""" ) def compute_metrics(lowerCAmelCase_ : str ): __lowercase : str = pred.predictions __lowercase : int = np.argmax(lowerCAmelCase_ , axis=-1 ) __lowercase : Optional[Any] = processor.tokenizer.pad_token_id __lowercase : Optional[int] = processor.batch_decode(lowerCAmelCase_ ) # we do not want to group tokens when computing the metrics __lowercase : Any = processor.batch_decode(pred.label_ids , group_tokens=lowerCAmelCase_ ) __lowercase : Union[str, Any] = wer_metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ ) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator __lowercase : Optional[int] = DataCollatorCTCWithPadding(processor=lowerCAmelCase_ , padding=lowerCAmelCase_ ) # Initialize our Trainer __lowercase : Optional[Any] = CTCTrainer( model=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , args=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , ) # Training if training_args.do_train: if last_checkpoint is not None: __lowercase : Optional[int] = last_checkpoint elif os.path.isdir(model_args.model_name_or_path ): __lowercase : int = model_args.model_name_or_path else: __lowercase : Union[str, Any] = None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank ): processor.save_pretrained(training_args.output_dir ) __lowercase : Union[str, Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_ ) trainer.save_model() __lowercase : List[Any] = train_result.metrics __lowercase : List[str] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ ) ) __lowercase : Union[str, Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) trainer.log_metrics("""train""" , lowerCAmelCase_ ) trainer.save_metrics("""train""" , lowerCAmelCase_ ) trainer.save_state() # Evaluation __lowercase : int = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowercase : int = trainer.evaluate() __lowercase : Any = data_args.max_val_samples if data_args.max_val_samples is not None else len(lowerCAmelCase_ ) __lowercase : Tuple = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) trainer.log_metrics("""eval""" , lowerCAmelCase_ ) trainer.save_metrics("""eval""" , lowerCAmelCase_ ) return results if __name__ == "__main__": main()
649
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase : int = { '''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''], '''convert_funnel_original_tf_checkpoint_to_pytorch''': [], '''tokenization_funnel''': ['''FunnelTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = ['''FunnelTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ '''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FunnelBaseModel''', '''FunnelForMaskedLM''', '''FunnelForMultipleChoice''', '''FunnelForPreTraining''', '''FunnelForQuestionAnswering''', '''FunnelForSequenceClassification''', '''FunnelForTokenClassification''', '''FunnelModel''', '''FunnelPreTrainedModel''', '''load_tf_weights_in_funnel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : str = [ '''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFFunnelBaseModel''', '''TFFunnelForMaskedLM''', '''TFFunnelForMultipleChoice''', '''TFFunnelForPreTraining''', '''TFFunnelForQuestionAnswering''', '''TFFunnelForSequenceClassification''', '''TFFunnelForTokenClassification''', '''TFFunnelModel''', '''TFFunnelPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
649
1
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE : """simple docstring""" @staticmethod def _lowerCAmelCase ( *_snake_case : Tuple , **_snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" a_ : Tuple =MODEL_FOR_OBJECT_DETECTION_MAPPING def _lowerCAmelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) -> List[str]: '''simple docstring''' a__ = ObjectDetectionPipeline(model=_snake_case , image_processor=_snake_case ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def _lowerCAmelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : List[str] ) -> Dict: '''simple docstring''' a__ = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 ) self.assertGreater(len(_snake_case ) , 0 ) for detected_object in outputs: self.assertEqual( _snake_case , { 'score': ANY(_snake_case ), 'label': ANY(_snake_case ), 'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )}, } , ) import datasets a__ = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) a__ = [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] a__ = object_detector(_snake_case , threshold=0.0 ) self.assertEqual(len(_snake_case ) , len(_snake_case ) ) for outputs in batch_outputs: self.assertGreater(len(_snake_case ) , 0 ) for detected_object in outputs: self.assertEqual( _snake_case , { 'score': ANY(_snake_case ), 'label': ANY(_snake_case ), 'box': {'xmin': ANY(_snake_case ), 'ymin': ANY(_snake_case ), 'xmax': ANY(_snake_case ), 'ymax': ANY(_snake_case )}, } , ) @require_tf @unittest.skip('Object detection not implemented in TF' ) def _lowerCAmelCase ( self : Tuple ) -> Tuple: '''simple docstring''' pass @require_torch def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]: '''simple docstring''' a__ = 'hf-internal-testing/tiny-detr-mobilenetsv3' a__ = AutoModelForObjectDetection.from_pretrained(_snake_case ) a__ = AutoFeatureExtractor.from_pretrained(_snake_case ) a__ = ObjectDetectionPipeline(model=_snake_case , feature_extractor=_snake_case ) a__ = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ {'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ] , ) a__ = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ [ {'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ], [ {'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ], ] , ) @require_torch @slow def _lowerCAmelCase ( self : Tuple ) -> Any: '''simple docstring''' a__ = 'facebook/detr-resnet-50' a__ = AutoModelForObjectDetection.from_pretrained(_snake_case ) a__ = AutoFeatureExtractor.from_pretrained(_snake_case ) a__ = ObjectDetectionPipeline(model=_snake_case , feature_extractor=_snake_case ) a__ = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ {'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ] , ) a__ = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ [ {'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], [ {'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], ] , ) @require_torch @slow def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' a__ = 'facebook/detr-resnet-50' a__ = pipeline('object-detection' , model=_snake_case ) a__ = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ {'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ] , ) a__ = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ [ {'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], [ {'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], ] , ) @require_torch @slow def _lowerCAmelCase ( self : int ) -> Union[str, Any]: '''simple docstring''' a__ = 0.9985 a__ = 'facebook/detr-resnet-50' a__ = pipeline('object-detection' , model=_snake_case ) a__ = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=_snake_case ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ] , ) @require_torch @require_pytesseract @slow def _lowerCAmelCase ( self : Tuple ) -> List[str]: '''simple docstring''' a__ = 'Narsil/layoutlmv3-finetuned-funsd' a__ = 0.9993 a__ = pipeline('object-detection' , model=_snake_case , threshold=_snake_case ) a__ = object_detector( 'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' ) self.assertEqual( nested_simplify(_snake_case , decimals=4 ) , [ {'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}}, {'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}}, ] , )
232
"""simple docstring""" def _lowerCamelCase ( UpperCAmelCase__ ) -> bool: '''simple docstring''' a__ = 0 for ch in input_str: a__ = ord(UpperCAmelCase__ ) a__ = pow(2,UpperCAmelCase__ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
232
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _lowercase ( __lowerCamelCase ): _lowercase : Tuple = 'char' _lowercase : Optional[int] = 'bpe' _lowercase : Tuple = 'wp' __lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _lowercase ( __lowerCamelCase ): _lowercase : str = ['image_processor', 'char_tokenizer'] _lowercase : Optional[Any] = 'ViTImageProcessor' _lowercase : str = 'MgpstrTokenizer' def __init__( self : List[Any] , lowerCamelCase__ : str=None , lowerCamelCase__ : List[Any]=None , **lowerCamelCase__ : int ) -> int: """simple docstring""" A_ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowerCamelCase__ , ) A_ = kwargs.pop('''feature_extractor''' ) A_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) A_ = tokenizer A_ = AutoTokenizer.from_pretrained('''gpt2''' ) A_ = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(lowerCamelCase__ , lowerCamelCase__ ) def __call__( self : Optional[int] , lowerCamelCase__ : str=None , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Tuple ) -> Optional[int]: """simple docstring""" if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: A_ = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) if text is not None: A_ = self.char_tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) if text is None: return inputs elif images is None: return encodings else: A_ = encodings['''input_ids'''] return inputs def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Optional[int] ) -> List[str]: """simple docstring""" A_ ,A_ ,A_ = sequences A_ = char_preds.size(0 ) A_ ,A_ = self._decode_helper(lowerCamelCase__ , '''char''' ) A_ ,A_ = self._decode_helper(lowerCamelCase__ , '''bpe''' ) A_ ,A_ = self._decode_helper(lowerCamelCase__ , '''wp''' ) A_ = [] A_ = [] for i in range(lowerCamelCase__ ): A_ = [char_scores[i], bpe_scores[i], wp_scores[i]] A_ = [char_strs[i], bpe_strs[i], wp_strs[i]] A_ = scores.index(max(lowerCamelCase__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) A_ = {} A_ = final_strs A_ = final_scores A_ = char_strs A_ = bpe_strs A_ = wp_strs return out def UpperCamelCase ( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Any ) -> Any: """simple docstring""" if format == DecodeType.CHARACTER: A_ = self.char_decode A_ = 1 A_ = '''[s]''' elif format == DecodeType.BPE: A_ = self.bpe_decode A_ = 2 A_ = '''#''' elif format == DecodeType.WORDPIECE: A_ = self.wp_decode A_ = 1_0_2 A_ = '''[SEP]''' else: raise ValueError(F"Format {format} is not supported." ) A_ ,A_ = [], [] A_ = pred_logits.size(0 ) A_ = pred_logits.size(1 ) A_ ,A_ = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase__ , sorted=lowerCamelCase__ ) A_ = preds_index.view(-1 , lowerCamelCase__ )[:, 1:] A_ = decoder(lowerCamelCase__ ) A_ ,A_ = torch.nn.functional.softmax(lowerCamelCase__ , dim=2 ).max(dim=2 ) A_ = preds_max_prob[:, 1:] for index in range(lowerCamelCase__ ): A_ = preds_str[index].find(lowerCamelCase__ ) A_ = preds_str[index][:pred_eos] A_ = preds_index[index].cpu().tolist() A_ = pred_index.index(lowerCamelCase__ ) if eos_token in pred_index else -1 A_ = preds_max_prob[index][: pred_eos_index + 1] A_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(lowerCamelCase__ ) conf_scores.append(lowerCamelCase__ ) return dec_strs, conf_scores def UpperCamelCase ( self : str , lowerCamelCase__ : Tuple ) -> Any: """simple docstring""" A_ = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase__ )] return decode_strs def UpperCamelCase ( self : str , lowerCamelCase__ : Dict ) -> Dict: """simple docstring""" return self.bpe_tokenizer.batch_decode(lowerCamelCase__ ) def UpperCamelCase ( self : Any , lowerCamelCase__ : Optional[Any] ) -> Tuple: """simple docstring""" A_ = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase__ )] return decode_strs
563
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowercase = logging.get_logger(__name__) class _lowercase ( __lowerCamelCase ): _lowercase : Optional[Any] = ['pixel_values'] def __init__( self : List[str] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Dict[str, int]] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 2_5_5 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , **lowerCamelCase__ : str , ) -> None: """simple docstring""" super().__init__(**lowerCamelCase__ ) A_ = size if size is not None else {'''shortest_edge''': 2_5_6} A_ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) A_ = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} A_ = get_size_dict(lowerCamelCase__ ) A_ = do_resize A_ = size A_ = resample A_ = do_center_crop A_ = crop_size A_ = do_rescale A_ = rescale_factor A_ = do_normalize A_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[int] , ) -> np.ndarray: """simple docstring""" A_ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) A_ = get_resize_output_image_size(lowerCamelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCamelCase__ ) return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def UpperCamelCase ( self : Optional[int] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Union[str, Any] , ) -> np.ndarray: """simple docstring""" A_ = get_size_dict(lowerCamelCase__ ) return center_crop(lowerCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def UpperCamelCase ( self : Any , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : float , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Union[str, Any] ) -> np.ndarray: """simple docstring""" return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def UpperCamelCase ( self : Optional[int] , lowerCamelCase__ : ImageInput , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[float] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase__ : Dict , ) -> Dict: """simple docstring""" A_ = do_resize if do_resize is not None else self.do_resize A_ = size if size is not None else self.size A_ = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) A_ = resample if resample is not None else self.resample A_ = do_center_crop if do_center_crop is not None else self.do_center_crop A_ = crop_size if crop_size is not None else self.crop_size A_ = get_size_dict(lowerCamelCase__ ) A_ = do_rescale if do_rescale is not None else self.do_rescale A_ = rescale_factor if rescale_factor is not None else self.rescale_factor A_ = do_normalize if do_normalize is not None else self.do_normalize A_ = image_mean if image_mean is not None else self.image_mean A_ = image_std if image_std is not None else self.image_std A_ = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A_ = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A_ = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images] if do_center_crop: A_ = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images] if do_rescale: A_ = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images] if do_normalize: A_ = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images] A_ = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images] A_ = {'''pixel_values''': images} return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
563
1
'''simple docstring''' from string import ascii_lowercase, ascii_uppercase def __UpperCAmelCase (lowercase__ ) -> str: '''simple docstring''' if not sentence: return "" a_ = dict(zip(lowercase__ ,lowercase__ ) ) return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
685
'''simple docstring''' import re def __UpperCAmelCase (lowercase__ ) -> bool: '''simple docstring''' a_ = re.compile( r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" ) return bool(re.search(lowercase__ ,lowercase__ ) ) if __name__ == "__main__": a_ = '0094702343221' print(is_sri_lankan_phone_number(phone))
685
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable lowerCAmelCase_ : Optional[Any] = list[list[float | int]] def UpperCAmelCase ( A : Matrix , A : Matrix ): SCREAMING_SNAKE_CASE : int = len(A ) SCREAMING_SNAKE_CASE : Matrix = [[0 for _ in range(size + 1 )] for _ in range(A )] SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : float for row in range(A ): for col in range(A ): SCREAMING_SNAKE_CASE : Optional[int] = matrix[row][col] SCREAMING_SNAKE_CASE : Optional[Any] = vector[row][0] SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Dict = 0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(A , A ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , A ): SCREAMING_SNAKE_CASE : Dict = augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , A ): for row in range(A ): SCREAMING_SNAKE_CASE : int = augmented[row][col] / augmented[col][col] for cola in range(A , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(A ) ] def UpperCAmelCase ( A : list[int] ): SCREAMING_SNAKE_CASE : int = len(A ) SCREAMING_SNAKE_CASE : Matrix = [[0 for _ in range(A )] for _ in range(A )] SCREAMING_SNAKE_CASE : Matrix = [[0] for _ in range(A )] SCREAMING_SNAKE_CASE : Matrix SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : int for x_val, y_val in enumerate(A ): for col in range(A ): SCREAMING_SNAKE_CASE : List[str] = (x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE : Tuple = y_val SCREAMING_SNAKE_CASE : Optional[Any] = solve(A , A ) def interpolated_func(A : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(A ) ) return interpolated_func def UpperCAmelCase ( A : int ): return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def UpperCAmelCase ( A : Callable[[int], int] = question_function , A : int = 10 ): SCREAMING_SNAKE_CASE : list[int] = [func(A ) for x_val in range(1 , order + 1 )] SCREAMING_SNAKE_CASE : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : Callable[[int], int] SCREAMING_SNAKE_CASE : int for poly in polynomials: SCREAMING_SNAKE_CASE : Tuple = 1 while func(A ) == poly(A ): x_val += 1 ret += poly(A ) return ret if __name__ == "__main__": print(f'{solution() = }')
464
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
464
1
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class snake_case_ ( __A , __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE : List[Any] = ( { "feature-extraction": TFMobileBertModel, "fill-mask": TFMobileBertForMaskedLM, "question-answering": TFMobileBertForQuestionAnswering, "text-classification": TFMobileBertForSequenceClassification, "token-classification": TFMobileBertForTokenClassification, "zero-shot": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[int] = False def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict=False ) ->List[str]: snake_case_ = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase ) if return_labels: if model_class in get_values(_UpperCamelCase ): snake_case_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class snake_case_ ( __A ): '''simple docstring''' def __init__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=1_3 , _UpperCamelCase : Tuple=7 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=True , _UpperCamelCase : Any=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : str=9_9 , _UpperCamelCase : Tuple=3_2 , _UpperCamelCase : int=3_2 , _UpperCamelCase : int=2 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Optional[int]=3_7 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : int=5_1_2 , _UpperCamelCase : Any=1_6 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : Any=3 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : str=None , ) ->List[Any]: snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope snake_case_ = embedding_size def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) ->Optional[Any]: snake_case_ = TFMobileBertModel(config=_UpperCamelCase ) snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ = model(_UpperCamelCase ) snake_case_ = [input_ids, input_mask] snake_case_ = model(_UpperCamelCase ) snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def snake_case__( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] ) ->List[str]: snake_case_ = TFMobileBertForMaskedLM(config=_UpperCamelCase ) snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->Optional[Any]: snake_case_ = TFMobileBertForNextSentencePrediction(config=_UpperCamelCase ) snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def snake_case__( self : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) ->List[Any]: snake_case_ = TFMobileBertForPreTraining(config=_UpperCamelCase ) snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def snake_case__( self : int , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ) ->Any: snake_case_ = self.num_labels snake_case_ = TFMobileBertForSequenceClassification(config=_UpperCamelCase ) snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : str ) ->Union[str, Any]: snake_case_ = self.num_choices snake_case_ = TFMobileBertForMultipleChoice(config=_UpperCamelCase ) snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) snake_case_ = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) snake_case_ = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case__( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->str: snake_case_ = self.num_labels snake_case_ = TFMobileBertForTokenClassification(config=_UpperCamelCase ) snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case__( self : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) ->str: snake_case_ = TFMobileBertForQuestionAnswering(config=_UpperCamelCase ) snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case__( self : Union[str, Any] ) ->List[str]: snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) = config_and_inputs snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def snake_case__( self : Tuple ) ->Optional[Any]: snake_case_ = TFMobileBertModelTest.TFMobileBertModelTester(self ) snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 ) def snake_case__( self : Optional[Any] ) ->Any: self.config_tester.run_common_tests() def snake_case__( self : str ) ->Dict: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_UpperCamelCase ) def snake_case__( self : List[str] ) ->Optional[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCamelCase ) def snake_case__( self : Optional[Any] ) ->List[str]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCamelCase ) def snake_case__( self : Tuple ) ->Optional[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCamelCase ) def snake_case__( self : List[str] ) ->Union[str, Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCamelCase ) def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCamelCase ) def snake_case__( self : Optional[Any] ) ->Union[str, Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCamelCase ) def snake_case__( self : Union[str, Any] ) ->Tuple: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCamelCase ) @slow def snake_case__( self : List[Any] ) ->int: # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: snake_case_ = TFMobileBertModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @require_tf class snake_case_ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case__( self : Tuple ) ->List[Any]: snake_case_ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) snake_case_ = tf.constant([[0, 1, 2, 3, 4, 5]] ) snake_case_ = model(_UpperCamelCase )[0] snake_case_ = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , _UpperCamelCase ) snake_case_ = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 )
39
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def A_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg''' __SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' ) __SCREAMING_SNAKE_CASE : List[Any] = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ), ] ) __SCREAMING_SNAKE_CASE : Dict = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE ) return image def A_ ( __SCREAMING_SNAKE_CASE : Any ) -> int: if "visual_encoder" in key: __SCREAMING_SNAKE_CASE : List[Any] = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , __SCREAMING_SNAKE_CASE ) if "blocks" in key: __SCREAMING_SNAKE_CASE : int = re.sub(R'''blocks''' , '''layers''' , __SCREAMING_SNAKE_CASE ) if "attn" in key: __SCREAMING_SNAKE_CASE : int = re.sub(R'''attn''' , '''self_attn''' , __SCREAMING_SNAKE_CASE ) if "norm1" in key: __SCREAMING_SNAKE_CASE : Optional[int] = re.sub(R'''norm1''' , '''layer_norm1''' , __SCREAMING_SNAKE_CASE ) if "norm2" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''norm2''' , '''layer_norm2''' , __SCREAMING_SNAKE_CASE ) if "encoder.norm" in key: __SCREAMING_SNAKE_CASE : int = re.sub(R'''encoder.norm''' , '''post_layernorm''' , __SCREAMING_SNAKE_CASE ) if "encoder.patch_embed.proj" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , __SCREAMING_SNAKE_CASE ) if "encoder.pos_embed" in key: __SCREAMING_SNAKE_CASE : Tuple = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , __SCREAMING_SNAKE_CASE ) if "encoder.cls_token" in key: __SCREAMING_SNAKE_CASE : List[str] = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , __SCREAMING_SNAKE_CASE ) if "self_attn" in key: __SCREAMING_SNAKE_CASE : List[Any] = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , __SCREAMING_SNAKE_CASE ) return key @torch.no_grad() def A_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=None ) -> Optional[Any]: if config_path is not None: __SCREAMING_SNAKE_CASE : Optional[int] = BlipConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE : List[str] = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} ) __SCREAMING_SNAKE_CASE : int = BlipForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval() __SCREAMING_SNAKE_CASE : Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth''' __SCREAMING_SNAKE_CASE : Any = blip_decoder(pretrained=__SCREAMING_SNAKE_CASE , image_size=3_84 , vit='''base''' ) __SCREAMING_SNAKE_CASE : int = pt_model.eval() __SCREAMING_SNAKE_CASE : Optional[Any] = pt_model.state_dict() for key in modified_state_dict.copy(): __SCREAMING_SNAKE_CASE : Union[str, Any] = modified_state_dict.pop(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : List[str] = rename_key(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : Optional[int] = value hf_model.load_state_dict(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : Optional[Any] = 3_84 __SCREAMING_SNAKE_CASE : List[Any] = load_demo_image(image_size=__SCREAMING_SNAKE_CASE , device='''cpu''' ) __SCREAMING_SNAKE_CASE : List[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer(['''a picture of'''] ).input_ids __SCREAMING_SNAKE_CASE : List[Any] = hf_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] __SCREAMING_SNAKE_CASE : List[str] = hf_model.generate(__SCREAMING_SNAKE_CASE ) assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(__SCREAMING_SNAKE_CASE ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' __SCREAMING_SNAKE_CASE : Optional[int] = ( '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = blip_vqa(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit='''base''' ) vqa_model.eval() __SCREAMING_SNAKE_CASE : List[Any] = vqa_model.state_dict() for key in modified_state_dict.copy(): __SCREAMING_SNAKE_CASE : Dict = modified_state_dict.pop(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : List[str] = rename_key(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : Optional[Any] = value __SCREAMING_SNAKE_CASE : Optional[Any] = BlipForQuestionAnswering(__SCREAMING_SNAKE_CASE ) hf_vqa_model.load_state_dict(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : Optional[Any] = ['''How many dogs are in this image?'''] __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids __SCREAMING_SNAKE_CASE : Optional[int] = hf_vqa_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' ) __SCREAMING_SNAKE_CASE : Optional[int] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth''' __SCREAMING_SNAKE_CASE : Any = blip_itm(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit='''base''' ) itm_model.eval() __SCREAMING_SNAKE_CASE : Union[str, Any] = itm_model.state_dict() for key in modified_state_dict.copy(): __SCREAMING_SNAKE_CASE : Any = modified_state_dict.pop(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : int = rename_key(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : Tuple = value __SCREAMING_SNAKE_CASE : Union[str, Any] = BlipForImageTextRetrieval(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : List[Any] = ['''A picture of a woman with a dog sitting in a beach'''] __SCREAMING_SNAKE_CASE : Any = tokenizer( __SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding='''max_length''' , truncation=__SCREAMING_SNAKE_CASE , max_length=35 , ).input_ids hf_itm_model.load_state_dict(__SCREAMING_SNAKE_CASE ) hf_itm_model.eval() __SCREAMING_SNAKE_CASE : Dict = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE : Optional[Any] = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE ) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") _A = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
158
0
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @property def snake_case_ ( self : Any ): torch.manual_seed(0 ) __lowercase : int = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def snake_case_ ( self : Optional[Any] ): __lowercase : Optional[Any] = self.dummy_uncond_unet __lowercase : str = KarrasVeScheduler() __lowercase : int = KarrasVePipeline(unet=_snake_case , scheduler=_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) __lowercase : Optional[int] = torch.manual_seed(0 ) __lowercase : int = pipe(num_inference_steps=2 , generator=_snake_case , output_type='''numpy''' ).images __lowercase : Optional[int] = torch.manual_seed(0 ) __lowercase : int = pipe(num_inference_steps=2 , generator=_snake_case , output_type='''numpy''' , return_dict=_snake_case )[0] __lowercase : Optional[Any] = image[0, -3:, -3:, -1] __lowercase : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowercase : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self : str ): __lowercase : Dict = '''google/ncsnpp-celebahq-256''' __lowercase : str = UNetaDModel.from_pretrained(_snake_case ) __lowercase : List[Any] = KarrasVeScheduler() __lowercase : List[Any] = KarrasVePipeline(unet=_snake_case , scheduler=_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) __lowercase : str = torch.manual_seed(0 ) __lowercase : Optional[int] = pipe(num_inference_steps=20 , generator=_snake_case , output_type='''numpy''' ).images __lowercase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) __lowercase : Union[str, Any] = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
284
import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __lowerCAmelCase ( lowerCAmelCase_ ): """simple docstring""" def snake_case_ ( self : Optional[int] ): __lowercase : Dict = tempfile.mkdtemp() __lowercase : Tuple = 8 # DPR tok __lowercase : Optional[int] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __lowercase : Optional[Any] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(_snake_case , exist_ok=_snake_case ) __lowercase : str = os.path.join(_snake_case , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok __lowercase : List[str] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __lowercase : Optional[Any] = dict(zip(_snake_case , range(len(_snake_case ) ) ) ) __lowercase : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowercase : Any = {'''unk_token''': '''<unk>'''} __lowercase : int = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(_snake_case , exist_ok=_snake_case ) __lowercase : List[Any] = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) __lowercase : Tuple = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_snake_case ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_snake_case ) ) def snake_case_ ( self : List[Any] ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def snake_case_ ( self : Dict ): return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def snake_case_ ( self : Optional[int] ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def snake_case_ ( self : str ): shutil.rmtree(self.tmpdirname ) def snake_case_ ( self : Tuple ): __lowercase : Dict = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def snake_case_ ( self : Union[str, Any] ): __lowercase : Union[str, Any] = self.get_dummy_dataset() __lowercase : Optional[int] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , ) with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: __lowercase : List[Any] = dataset __lowercase : str = RagRetriever( _snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) return retriever def snake_case_ ( self : int , _snake_case : bool ): __lowercase : Dict = self.get_dummy_dataset() __lowercase : List[Any] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , ) if from_disk: __lowercase : List[Any] = os.path.join(self.tmpdirname , '''dataset''' ) __lowercase : Union[str, Any] = os.path.join(self.tmpdirname , '''index.faiss''' ) dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) ) dataset.drop_index('''embeddings''' ) dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) ) del dataset __lowercase : Optional[Any] = RagRetriever( _snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , ) else: __lowercase : Optional[Any] = RagRetriever( _snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _snake_case ) , ) return retriever def snake_case_ ( self : str ): __lowercase : List[str] = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''text''': ['''foo''', '''bar'''], '''title''': ['''Foo''', '''Bar'''], '''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT ) __lowercase : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' ) dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' ) pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) ) __lowercase : str = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' ) __lowercase : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset} pickle.dump(_snake_case , open(_snake_case , '''wb''' ) ) __lowercase : List[str] = RagConfig( retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , ) __lowercase : Optional[int] = RagRetriever( _snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() ) return retriever def snake_case_ ( self : Optional[Any] ): __lowercase : List[str] = 1 __lowercase : Tuple = self.get_dummy_canonical_hf_index_retriever() __lowercase : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowercase , __lowercase , __lowercase : str = retriever.retrieve(_snake_case , n_docs=_snake_case ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_snake_case ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def snake_case_ ( self : int ): __lowercase : int = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset: __lowercase : Optional[Any] = self.get_dummy_dataset() retriever.save_pretrained(_snake_case ) __lowercase : str = RagRetriever.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case , _snake_case ) __lowercase : List[str] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowercase : Union[str, Any] = retriever.retrieve(_snake_case , n_docs=1 ) self.assertTrue(out is not None ) def snake_case_ ( self : str ): __lowercase : List[str] = 1 __lowercase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) __lowercase : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowercase , __lowercase , __lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=_snake_case ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_snake_case ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def snake_case_ ( self : Any ): __lowercase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_snake_case ) __lowercase : Optional[Any] = RagRetriever.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case , _snake_case ) __lowercase : Optional[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowercase : List[Any] = retriever.retrieve(_snake_case , n_docs=1 ) self.assertTrue(out is not None ) def snake_case_ ( self : List[Any] ): __lowercase : Any = 1 __lowercase : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) __lowercase : Any = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowercase , __lowercase , __lowercase : Union[str, Any] = retriever.retrieve(_snake_case , n_docs=_snake_case ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_snake_case ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case ) self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def snake_case_ ( self : Any ): __lowercase : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_snake_case ) __lowercase : Optional[int] = RagRetriever.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case , _snake_case ) __lowercase : Union[str, Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=1 ) self.assertTrue(out is not None ) def snake_case_ ( self : Tuple ): __lowercase : Optional[int] = 1 __lowercase : str = self.get_dummy_legacy_index_retriever() __lowercase : Tuple = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowercase , __lowercase , __lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=_snake_case ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(_snake_case ) , 2 ) self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] ) self.assertEqual(len(doc_dicts[0]['''text'''] ) , _snake_case ) self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() , [[1], [0]] ) def snake_case_ ( self : Union[str, Any] ): __lowercase : Tuple = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(_snake_case ) __lowercase : Tuple = RagRetriever.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case , _snake_case ) __lowercase : Optional[int] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def snake_case_ ( self : Optional[Any] ): import torch __lowercase : Tuple = 1 __lowercase : Any = self.get_dummy_canonical_hf_index_retriever() __lowercase : str = [[5, 7], [10, 11]] __lowercase : List[Any] = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowercase : Any = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case ) __lowercase , __lowercase , __lowercase : Tuple = ( out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(_snake_case , _snake_case ) self.assertIsInstance(_snake_case , _snake_case ) self.assertIsInstance(_snake_case , np.ndarray ) __lowercase : Optional[Any] = retriever( _snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case , return_tensors='''pt''' , ) __lowercase , __lowercase , __lowercase , __lowercase : Optional[Any] = ( # noqa: F841 out['''context_input_ids'''], out['''context_attention_mask'''], out['''retrieved_doc_embeds'''], out['''doc_ids'''], ) self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(_snake_case , torch.Tensor ) self.assertIsInstance(_snake_case , torch.Tensor ) self.assertIsInstance(_snake_case , torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def snake_case_ ( self : List[Any] ): __lowercase : Tuple = self.get_dpr_ctx_encoder_tokenizer() __lowercase : str = 1 __lowercase : int = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case ) retriever.set_ctx_encoder_tokenizer(_snake_case ) __lowercase : Tuple = [[5, 7], [10, 11]] __lowercase : int = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa ) __lowercase : Any = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case ) self.assertEqual( len(_snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _snake_case ) # check for doc token related keys in dictionary.
284
1
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase : Any = get_tests_dir("fixtures/test_sentencepiece.model") __UpperCamelCase : Tuple = get_tests_dir("fixtures/test_sentencepiece_bpe.model") __UpperCamelCase : Tuple = """pt""" if is_torch_available() else """tf""" @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( a_ , unittest.TestCase ): UpperCamelCase__ = CamembertTokenizer UpperCamelCase__ = CamembertTokenizerFast UpperCamelCase__ = True UpperCamelCase__ = True def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing a = CamembertTokenizer(UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = """<pad>""" a = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(UpperCamelCase_ ) , 1004 ) def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def lowerCamelCase__ ( self :List[str] ): '''simple docstring''' a = CamembertTokenizer(UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) a = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) a = """I was born in 92000, and this is falsé.""" a = tokenizer.encode(UpperCamelCase_ ) a = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) a = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) a = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) a = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) a = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return a = self.get_tokenizer() a = self.get_rust_tokenizer() a = """I was born in 92000, and this is falsé.""" a = tokenizer.tokenize(UpperCamelCase_ ) a = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) a = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) a = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) a = self.get_rust_tokenizer() a = tokenizer.encode(UpperCamelCase_ ) a = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' a = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. a = [ """Le transformeur est un modèle d\'apprentissage profond introduit en 2017, """ """utilisé principalement dans le domaine du traitement automatique des langues (TAL).""", """À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """ """pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """ """telles que la traduction et la synthèse de texte.""", ] self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCamelCase_ , )
468
"""simple docstring""" from maths.prime_check import is_prime def A ( __snake_case: int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): __magic_name__ = F"""Input value of [number={number}] must be an integer""" raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
545
0
'''simple docstring''' from __future__ import annotations class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : Dict = 0): '''simple docstring''' snake_case__ = key def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int]): '''simple docstring''' assert isinstance(A__ , A__) and isinstance(A__ , A__) snake_case__ = key or self.__key or 1 # make sure key is an appropriate size key %= 2_5_5 return [chr(ord(A__) ^ key) for ch in content] def __magic_name__ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]): '''simple docstring''' assert isinstance(A__ , A__) and isinstance(A__ , A__) snake_case__ = key or self.__key or 1 # make sure key is an appropriate size key %= 2_5_5 return [chr(ord(A__) ^ key) for ch in content] def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] = 0): '''simple docstring''' assert isinstance(A__ , A__) and isinstance(A__ , A__) snake_case__ = key or self.__key or 1 # make sure key can be any size while key > 2_5_5: key -= 2_5_5 # This will be returned snake_case__ = """""" for ch in content: ans += chr(ord(A__) ^ key) return ans def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str = 0): '''simple docstring''' assert isinstance(A__ , A__) and isinstance(A__ , A__) snake_case__ = key or self.__key or 1 # make sure key can be any size while key > 2_5_5: key -= 2_5_5 # This will be returned snake_case__ = """""" for ch in content: ans += chr(ord(A__) ^ key) return ans def __magic_name__ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Any = 0): '''simple docstring''' assert isinstance(A__ , A__) and isinstance(A__ , A__) try: with open(A__) as fin, open("""encrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(A__ , A__)) except OSError: return False return True def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict): '''simple docstring''' assert isinstance(A__ , A__) and isinstance(A__ , A__) try: with open(A__) as fin, open("""decrypt.out""" , """w+""") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(A__ , A__)) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
720
import random from typing import Any def _UpperCAmelCase ( a : list ): for _ in range(len(a ) ): snake_case__ = random.randint(0 , len(a ) - 1 ) snake_case__ = random.randint(0 , len(a ) - 1 ) snake_case__ , snake_case__ = data[b], data[a] return data if __name__ == "__main__": a__ = [0, 1, 2, 3, 4, 5, 6, 7] a__ = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
99
0
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def UpperCamelCase ( a , a , a , a , a ) -> np.ndarray: '''simple docstring''' __magic_name__ = cva.getAffineTransform(a , a ) return cva.warpAffine(a , a , (rows, cols) ) if __name__ == "__main__": # read original image _lowerCAmelCase = cva.imread( str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg") ) # turn image in gray scale value _lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape _lowerCAmelCase , _lowerCAmelCase = gray_img.shape # set different points to rotate image _lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) _lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) _lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) _lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list _lowerCAmelCase = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations _lowerCAmelCase = plt.figure(1) _lowerCAmelCase = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, "gray") plt.title(titles[i]) plt.axis("off") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
432
'''simple docstring''' import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 _lowerCAmelCase = 0B10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 _lowerCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class _SCREAMING_SNAKE_CASE : def __init__( self : List[Any] ): __magic_name__ = WATERMARK_BITS __magic_name__ = WatermarkEncoder() self.encoder.set_watermark('''bits''' , self.watermark ) def snake_case__ ( self : Optional[Any] , a__ : torch.FloatTensor ): # can't encode images that are smaller than 256 if images.shape[-1] < 256: return images __magic_name__ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __magic_name__ = [self.encoder.encode(a__ , '''dwtDct''' ) for image in images] __magic_name__ = torch.from_numpy(np.array(a__ ) ).permute(0 , 3 , 1 , 2 ) __magic_name__ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
432
1
from ..utils import DummyObject, requires_backends class __lowercase ( metaclass=__lowerCAmelCase ): '''simple docstring''' a : Dict = ["torch", "transformers", "onnx"] def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Dict: '''simple docstring''' requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] ) @classmethod def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[Any]: '''simple docstring''' requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] ) @classmethod def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> str: '''simple docstring''' requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] ) class __lowercase ( metaclass=__lowerCAmelCase ): '''simple docstring''' a : Optional[Any] = ["torch", "transformers", "onnx"] def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] ) @classmethod def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[Any]: '''simple docstring''' requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] ) @classmethod def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple: '''simple docstring''' requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] ) class __lowercase ( metaclass=__lowerCAmelCase ): '''simple docstring''' a : Union[str, Any] = ["torch", "transformers", "onnx"] def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> int: '''simple docstring''' requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] ) @classmethod def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]: '''simple docstring''' requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] ) @classmethod def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]: '''simple docstring''' requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] ) class __lowercase ( metaclass=__lowerCAmelCase ): '''simple docstring''' a : Any = ["torch", "transformers", "onnx"] def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> str: '''simple docstring''' requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] ) @classmethod def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[int]: '''simple docstring''' requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] ) @classmethod def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Optional[Any]: '''simple docstring''' requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] ) class __lowercase ( metaclass=__lowerCAmelCase ): '''simple docstring''' a : Optional[Any] = ["torch", "transformers", "onnx"] def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> Any: '''simple docstring''' requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] ) @classmethod def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> str: '''simple docstring''' requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] ) @classmethod def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> Tuple: '''simple docstring''' requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] ) class __lowercase ( metaclass=__lowerCAmelCase ): '''simple docstring''' a : Optional[int] = ["torch", "transformers", "onnx"] def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]: '''simple docstring''' requires_backends(self ,['''torch''', '''transformers''', '''onnx'''] ) @classmethod def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> List[str]: '''simple docstring''' requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] ) @classmethod def _UpperCAmelCase (cls ,*_lowerCamelCase ,**_lowerCamelCase ) -> int: '''simple docstring''' requires_backends(cls ,['''torch''', '''transformers''', '''onnx'''] )
715
'''simple docstring''' import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class __lowercase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self ,*_lowerCamelCase ,**_lowerCamelCase ) -> None: '''simple docstring''' warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' ,_lowerCamelCase ,) super().__init__(*_lowerCamelCase ,**_lowerCamelCase )
56
0
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _UpperCamelCase : """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=2 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=36 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=6 , lowerCAmelCase__=6 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , lowerCAmelCase__=10_00 , ) -> str: '''simple docstring''' __lowercase = parent __lowercase = batch_size __lowercase = num_channels __lowercase = image_size __lowercase = patch_size __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = coordinate_size __lowercase = shape_size __lowercase = num_labels __lowercase = num_choices __lowercase = scope __lowercase = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __lowercase = text_seq_length __lowercase = (image_size // patch_size) ** 2 + 1 __lowercase = self.text_seq_length + self.image_seq_length def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' __lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __lowercase = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __lowercase = bbox[i, j, 3] __lowercase = bbox[i, j, 1] __lowercase = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __lowercase = bbox[i, j, 2] __lowercase = bbox[i, j, 0] __lowercase = tmp_coordinate __lowercase = tf.constant(lowerCAmelCase__ ) __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.text_seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __lowercase = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: '''simple docstring''' __lowercase = TFLayoutLMvaModel(config=lowerCAmelCase__ ) # text + image __lowercase = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , training=lowerCAmelCase__ ) __lowercase = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , training=lowerCAmelCase__ , ) __lowercase = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , training=lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __lowercase = model(lowerCAmelCase__ , training=lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __lowercase = model({'''pixel_values''': pixel_values} , training=lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: '''simple docstring''' __lowercase = self.num_labels __lowercase = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase__ ) __lowercase = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: '''simple docstring''' __lowercase = self.num_labels __lowercase = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase__ ) __lowercase = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: '''simple docstring''' __lowercase = 2 __lowercase = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ ) __lowercase = model( lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , training=lowerCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' __lowercase = self.prepare_config_and_inputs() ((__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase)) = config_and_inputs __lowercase = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ): """simple docstring""" __a : Optional[int] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) __a : List[str] = ( {'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel} if is_tf_available() else {} ) __a : List[Any] = False __a : List[str] = False __a : str = False def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' return True def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> dict: '''simple docstring''' __lowercase = copy.deepcopy(lowerCAmelCase__ ) if model_class in get_values(lowerCAmelCase__ ): __lowercase = { k: tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(lowerCAmelCase__ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCAmelCase__ ): __lowercase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCAmelCase__ ): __lowercase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __lowercase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCAmelCase__ ): __lowercase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCAmelCase__ ): __lowercase = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def _SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' __lowercase = TFLayoutLMvaModelTester(self ) __lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(lowerCAmelCase__ ) if getattr(lowerCAmelCase__ , '''hf_compute_loss''' , lowerCAmelCase__ ): # The number of elements in the loss should be the same as the number of elements in the label __lowercase = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) __lowercase = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase__ )[0] ] __lowercase = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __lowercase = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) __lowercase = prepared_for_class.pop('''input_ids''' ) __lowercase = model(lowerCAmelCase__ , **lowerCAmelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __lowercase = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) __lowercase = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: __lowercase = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __lowercase = -1_00 __lowercase = tf.convert_to_tensor(lowerCAmelCase__ ) __lowercase = model(lowerCAmelCase__ , **lowerCAmelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __lowercase = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) __lowercase = model(lowerCAmelCase__ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __lowercase = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) # Get keys that were added with the _prepare_for_class function __lowercase = prepared_for_class.keys() - inputs_dict.keys() __lowercase = inspect.signature(model.call ).parameters __lowercase = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __lowercase = {0: '''input_ids'''} for label_key in label_keys: __lowercase = signature_names.index(lowerCAmelCase__ ) __lowercase = label_key __lowercase = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __lowercase = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __lowercase = prepared_for_class[value] __lowercase = tuple(lowerCAmelCase__ ) # Send to model __lowercase = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def _SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase = type self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) @slow def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def UpperCAmelCase ( ): """simple docstring""" __lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None @slow def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' __lowercase = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' ).pixel_values __lowercase = tf.constant([[1, 2]] ) __lowercase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __lowercase = model(input_ids=lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , training=lowerCAmelCase__ ) # verify the logits __lowercase = (1, 1_99, 7_68) self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ ) __lowercase = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
534
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __a : Union[str, Any] = 1_6 __a : int = 3_2 def UpperCAmelCase ( lowercase , lowercase = 16 ): """simple docstring""" __lowercase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowercase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) __lowercase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase , max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowercase = datasets.map( lowercase , batched=lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowercase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowercase = 16 elif accelerator.mixed_precision != "no": __lowercase = 8 else: __lowercase = None return tokenizer.pad( lowercase , padding='''longest''' , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors='''pt''' , ) # Instantiate dataloaders. __lowercase = DataLoader( tokenized_datasets['''train'''] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) __lowercase = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __a : List[Any] = mocked_dataloaders # noqa: F811 def UpperCAmelCase ( lowercase , lowercase ): """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowercase ) == "1": __lowercase = 2 # Initialize accelerator __lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowercase = config['''lr'''] __lowercase = int(config['''num_epochs'''] ) __lowercase = int(config['''seed'''] ) __lowercase = int(config['''batch_size'''] ) __lowercase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation __lowercase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __lowercase = batch_size // MAX_GPU_BATCH_SIZE __lowercase = MAX_GPU_BATCH_SIZE set_seed(lowercase ) __lowercase , __lowercase = get_dataloaders(lowercase , lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowercase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowercase = model.to(accelerator.device ) # Instantiate optimizer __lowercase = AdamW(params=model.parameters() , lr=lowercase ) # Instantiate scheduler __lowercase = get_linear_schedule_with_warmup( optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __lowercase = model(**lowercase ) __lowercase = outputs.loss __lowercase = loss / gradient_accumulation_steps accelerator.backward(lowercase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() __lowercase = 0 for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowercase = model(**lowercase ) __lowercase = outputs.logits.argmax(dim=-1 ) __lowercase , __lowercase = accelerator.gather((predictions, batch['''labels''']) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(lowercase ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples __lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen] __lowercase = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=lowercase , references=lowercase , ) __lowercase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , lowercase ) def UpperCAmelCase ( ): """simple docstring""" __lowercase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowercase , default=lowercase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) __lowercase = parser.parse_args() __lowercase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowercase , lowercase ) if __name__ == "__main__": main()
534
1
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _A ( lowerCamelCase ): a__ : Tuple = [] for line in lines: a__ : Any = re.sub(r"#.*" , "" , lowerCamelCase ) # remove comments if line: filtered_lines.append(lowerCamelCase ) a__ : Optional[Any] = "\n".join(lowerCamelCase ) # Make a hash from all this code a__ : Union[str, Any] = full_str.encode("utf-8" ) return shaaaa(lowerCamelCase ).hexdigest() # get importable module names and hash for caching SCREAMING_SNAKE_CASE__ : Dict = { """csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), """json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), """pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), """parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), """arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), """text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), """imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), """audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions SCREAMING_SNAKE_CASE__ : Any = { """.csv""": ("""csv""", {}), """.tsv""": ("""csv""", {"""sep""": """\t"""}), """.json""": ("""json""", {}), """.jsonl""": ("""json""", {}), """.parquet""": ("""parquet""", {}), """.arrow""": ("""arrow""", {}), """.txt""": ("""text""", {}), } _EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""imagefolder""", """audiofolder"""} # Used to filter data files based on extensions given a module name SCREAMING_SNAKE_CASE__ : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""") _MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
629
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration SCREAMING_SNAKE_CASE__ : List[str] = { """tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""", """tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""", """base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""", """base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""", """small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""", """small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""", """medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""", """medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""", """large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""", """large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""", } def _A ( lowerCamelCase ): a__ : Optional[int] = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(lowerCamelCase , lowerCamelCase ) SCREAMING_SNAKE_CASE__ : List[str] = { """blocks""": """layers""", """mlp.0""": """fc1""", """mlp.2""": """fc2""", """mlp_ln""": """final_layer_norm""", """.attn.query""": """.self_attn.q_proj""", """.attn.key""": """.self_attn.k_proj""", """.attn.value""": """.self_attn.v_proj""", """.attn_ln""": """.self_attn_layer_norm""", """.attn.out""": """.self_attn.out_proj""", """.cross_attn.query""": """.encoder_attn.q_proj""", """.cross_attn.key""": """.encoder_attn.k_proj""", """.cross_attn.value""": """.encoder_attn.v_proj""", """.cross_attn_ln""": """.encoder_attn_layer_norm""", """.cross_attn.out""": """.encoder_attn.out_proj""", """decoder.ln.""": """decoder.layer_norm.""", """encoder.ln.""": """encoder.layer_norm.""", """token_embedding""": """embed_tokens""", """encoder.positional_embedding""": """encoder.embed_positions.weight""", """decoder.positional_embedding""": """decoder.embed_positions.weight""", """ln_post""": """layer_norm""", } def _A ( lowerCamelCase ): a__ : Tuple = list(s_dict.keys() ) for key in keys: a__ : Optional[Any] = key for k, v in WHISPER_MAPPING.items(): if k in key: a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase ) print(F"""{key} -> {new_key}""" ) a__ : Dict = s_dict.pop(lowerCamelCase ) return s_dict def _A ( lowerCamelCase ): a__ , a__ : Any = emb.weight.shape a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase ) a__ : Optional[Any] = emb.weight.data return lin_layer def _A ( lowerCamelCase , lowerCamelCase ): os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase ) a__ : Optional[Any] = os.path.basename(lowerCamelCase ) a__ : List[Any] = url.split("/" )[-2] a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase ) if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ): raise RuntimeError(F"""{download_target} exists and is not a regular file""" ) if os.path.isfile(lowerCamelCase ): a__ : Any = open(lowerCamelCase , "rb" ).read() if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" ) with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output: with tqdm( total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop: while True: a__ : Optional[Any] = source.read(8192 ) if not buffer: break output.write(lowerCamelCase ) loop.update(len(lowerCamelCase ) ) a__ : Optional[int] = open(lowerCamelCase , "rb" ).read() if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return model_bytes def _A ( lowerCamelCase , lowerCamelCase ): if ".pt" not in checkpoint_path: a__ : str = _download(_MODELS[checkpoint_path] ) else: a__ : str = torch.load(lowerCamelCase , map_location="cpu" ) a__ : Dict = original_checkpoint["dims"] a__ : Optional[int] = original_checkpoint["model_state_dict"] a__ : Any = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(lowerCamelCase ) rename_keys(lowerCamelCase ) a__ : Optional[Any] = True a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0] a__ : Tuple = WhisperConfig( vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , ) a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase ) a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase ) if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F""" but all the following weights are missing {missing}""" ) if tie_embeds: a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens ) else: a__ : str = proj_out_weights model.save_pretrained(lowerCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() # # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
629
1
def lowerCamelCase ( UpperCamelCase : int = 10_00 ) -> int: _lowerCamelCase = 2**power _lowerCamelCase = 0 while n: _lowerCamelCase , _lowerCamelCase = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
544
from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __lt__( self : List[Any] , snake_case__ : int ) -> Any: return self[-1] < other[-1] def __eq__( self : Tuple , snake_case__ : Union[str, Any] ) -> Union[str, Any]: return self[-1] == other[-1] def lowerCamelCase ( UpperCamelCase : list ) -> list: _lowerCamelCase = [] # sort into stacks for element in collection: _lowerCamelCase = Stack([element] ) _lowerCamelCase = bisect_left(UpperCamelCase , UpperCamelCase ) if i != len(UpperCamelCase ): stacks[i].append(UpperCamelCase ) else: stacks.append(UpperCamelCase ) # use a heap-based merge to merge stack efficiently _lowerCamelCase = merge(*(reversed(UpperCamelCase ) for stack in stacks) ) return collection if __name__ == "__main__": A = input('Enter numbers separated by a comma:\n').strip() A = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
544
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : str = { "configuration_clap": [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapAudioConfig", "ClapConfig", "ClapTextConfig", ], "processing_clap": ["ClapProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ "CLAP_PRETRAINED_MODEL_ARCHIVE_LIST", "ClapModel", "ClapPreTrainedModel", "ClapTextModel", "ClapTextModelWithProjection", "ClapAudioModel", "ClapAudioModelWithProjection", ] __lowerCAmelCase : Tuple = ["ClapFeatureExtractor"] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
674
"""simple docstring""" from __future__ import annotations from math import gcd def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ): """simple docstring""" if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: return (pow(lowerCamelCase__ , 2 ) + step) % modulus for _ in range(lowerCamelCase__ ): # These track the position within the cycle detection logic. lowerCAmelCase__ = seed lowerCAmelCase__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowerCAmelCase__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) __lowerCAmelCase : List[str] = parser.parse_args() __lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"{args.num} is probably prime") else: __lowerCAmelCase : List[str] = args.num // divisor print(F"{args.num} = {divisor} * {quotient}")
674
1
import math from collections.abc import Callable def a_ ( UpperCamelCase_ : Callable[[float], float] , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> int: """simple docstring""" lowerCamelCase = xa lowerCamelCase = xa while True: if x_n == x_na or function(A__ ) == function(A__ ): raise ZeroDivisionError('float division by zero, could not find root' ) lowerCamelCase = x_na - ( function(A__ ) / ((function(A__ ) - function(A__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 1_0**-5: return x_na lowerCamelCase = x_na lowerCamelCase = x_na def a_ ( UpperCamelCase_ : float ) -> Tuple: """simple docstring""" return math.pow(A__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
246
from typing import Any import numpy as np def __a ( A__ : np.ndarray ): return np.array_equal(A__ , matrix.conjugate().T ) def __a ( A__ : np.ndarray , A__ : np.ndarray ): SCREAMING_SNAKE_CASE = v.conjugate().T SCREAMING_SNAKE_CASE = v_star.dot(A__ ) assert isinstance(A__ , np.ndarray ) return (v_star_dot.dot(A__ )) / (v_star.dot(A__ )) def __a ( ): SCREAMING_SNAKE_CASE = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) SCREAMING_SNAKE_CASE = np.array([[1], [2], [3]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." print(rayleigh_quotient(A__ , A__ ) ) SCREAMING_SNAKE_CASE = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(A__ ), F"{a} is not hermitian." assert rayleigh_quotient(A__ , A__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
16
0
def lowerCamelCase_ ( lowerCAmelCase: int = 50_00_00_00 )-> int: _snake_case : Dict = set() _snake_case : Dict = int((limit - 24) ** (1 / 2) ) _snake_case : List[Any] = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase ) ) ) for primea in primes: _snake_case : Optional[Any] = primea * primea for primea in primes: _snake_case : str = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: _snake_case : List[Any] = primea * primea * primea * primea _snake_case : Dict = square + cube + tetr if total >= limit: break ret.add(lowerCAmelCase ) return len(lowerCAmelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
714
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """MIT/ast-finetuned-audioset-10-10-0.4593""": ( """https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json""" ), } class _lowerCAmelCase ( UpperCAmelCase_ ): '''simple docstring''' a_ : Tuple ="""audio-spectrogram-transformer""" def __init__( self : List[Any] , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : int=12 , UpperCamelCase : str=12 , UpperCamelCase : Tuple=30_72 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Dict=1e-1_2 , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=True , UpperCamelCase : Any=10 , UpperCamelCase : Optional[int]=10 , UpperCamelCase : int=10_24 , UpperCamelCase : Optional[Any]=1_28 , **UpperCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase ) _snake_case : Tuple = hidden_size _snake_case : str = num_hidden_layers _snake_case : Optional[Any] = num_attention_heads _snake_case : Optional[Any] = intermediate_size _snake_case : Optional[Any] = hidden_act _snake_case : List[str] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Any = initializer_range _snake_case : List[str] = layer_norm_eps _snake_case : int = patch_size _snake_case : List[str] = qkv_bias _snake_case : int = frequency_stride _snake_case : List[Any] = time_stride _snake_case : List[Any] = max_length _snake_case : List[str] = num_mel_bins
669
0
import string def lowerCAmelCase_ ( __lowerCamelCase ): for key in range(len(string.ascii_uppercase ) ): __snake_case : Optional[int] = "" for symbol in message: if symbol in string.ascii_uppercase: __snake_case : Dict = string.ascii_uppercase.find(__lowerCamelCase ) __snake_case : Any = num - key if num < 0: __snake_case : Optional[Any] = num + len(string.ascii_uppercase ) __snake_case : List[Any] = translated + string.ascii_uppercase[num] else: __snake_case : Dict = translated + symbol print(F'Decryption using Key #{key}: {translated}' ) def lowerCAmelCase_ ( ): __snake_case : Tuple = input("Encrypted message: " ) __snake_case : int = message.upper() decrypt(__lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
81
"""simple docstring""" def lowerCAmelCase__ ( __magic_name__ = 1_0 ) ->str: if not isinstance(__magic_name__ , __magic_name__ ) or n < 0: raise ValueError("Invalid input" ) __lowercase = 1_0**n __lowercase = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , __magic_name__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"{solution(10) = }")
118
0
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("""repo_id""" ,["""canonical_dataset_name""", """org-name/dataset-name"""] ) @pytest.mark.parametrize("""path""" ,["""filename.csv""", """filename with blanks.csv"""] ) @pytest.mark.parametrize("""revision""" ,[None, """v2"""] ) def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): '''simple docstring''' A_ : Union[str, Any] = hf_hub_url(repo_id=_lowerCAmelCase ,path=_lowerCAmelCase ,revision=_lowerCAmelCase ) assert url == f"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_lowerCAmelCase )}"""
481
def _lowerCAmelCase ( _lowerCAmelCase ): '''simple docstring''' A_ : Union[str, Any] = [0] * len(_lowerCAmelCase ) A_ : Optional[int] = [] A_ : str = [] A_ : Dict = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(_lowerCAmelCase ) ): if indegree[i] == 0: queue.append(_lowerCAmelCase ) while queue: A_ : List[str] = queue.pop(0 ) cnt += 1 topo.append(_lowerCAmelCase ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(_lowerCAmelCase ) if cnt != len(_lowerCAmelCase ): print("""Cycle exists""" ) else: print(_lowerCAmelCase ) # Adjacency List of Graph _lowerCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
481
1
import math def UpperCAmelCase__ ( lowerCamelCase_ : list , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = 0 ): __a : List[str] = end or len(lowerCamelCase_ ) for i in range(lowerCamelCase_ , lowerCamelCase_ ): __a : str = i __a : int = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __a : Dict = array[temp_index - 1] temp_index -= 1 __a : List[Any] = temp_index_value return array def UpperCAmelCase__ ( lowerCamelCase_ : list , lowerCamelCase_ : int , lowerCamelCase_ : int ): # Max Heap __a : Optional[int] = index __a : int = 2 * index + 1 # Left Node __a : str = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __a : Dict = left_index if right_index < heap_size and array[largest] < array[right_index]: __a : Tuple = right_index if largest != index: __a , __a : int = array[largest], array[index] heapify(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase__ ( lowerCamelCase_ : list ): __a : Dict = len(lowerCamelCase_ ) for i in range(n // 2 , -1 , -1 ): heapify(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for i in range(n - 1 , 0 , -1 ): __a , __a : Any = array[0], array[i] heapify(lowerCamelCase_ , 0 , lowerCamelCase_ ) return array def UpperCAmelCase__ ( lowerCamelCase_ : list , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ): if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def UpperCAmelCase__ ( lowerCamelCase_ : list , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ): __a : int = low __a : Tuple = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __a , __a : int = array[j], array[i] i += 1 def UpperCAmelCase__ ( lowerCamelCase_ : list ): if len(lowerCamelCase_ ) == 0: return array __a : List[str] = 2 * math.ceil(math.loga(len(lowerCamelCase_ ) ) ) __a : List[Any] = 1_6 return intro_sort(lowerCamelCase_ , 0 , len(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase__ ( lowerCamelCase_ : list , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ): while end - start > size_threshold: if max_depth == 0: return heap_sort(lowerCamelCase_ ) max_depth -= 1 __a : Tuple = median_of_a(lowerCamelCase_ , lowerCamelCase_ , start + ((end - start) // 2) + 1 , end - 1 ) __a : Union[str, Any] = partition(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) intro_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) __a : Dict = p return insertion_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = input('''Enter numbers separated by a comma : ''').strip() SCREAMING_SNAKE_CASE__ = [float(item) for item in user_input.split(''',''')] print(sort(unsorted))
47
import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _lowercase ( UpperCamelCase_ ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE__ = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(UpperCamelCase_ , UpperCamelCase_ ) def _lowercase ( UpperCamelCase_ ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.weight.shape SCREAMING_SNAKE_CASE__ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ ) SCREAMING_SNAKE_CASE__ = emb.weight.data return lin_layer def _lowercase ( UpperCamelCase_ , UpperCamelCase_="facebook/mbart-large-en-ro" , UpperCamelCase_=False , UpperCamelCase_=False ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase_ , map_location='cpu' )['model'] remove_ignore_keys_(UpperCamelCase_ ) SCREAMING_SNAKE_CASE__ = state_dict['encoder.embed_tokens.weight'].shape[0] SCREAMING_SNAKE_CASE__ = MBartConfig.from_pretrained(UpperCamelCase_ , vocab_size=UpperCamelCase_ ) if mbart_aa and finetuned: SCREAMING_SNAKE_CASE__ = 'relu' SCREAMING_SNAKE_CASE__ = state_dict['decoder.embed_tokens.weight'] SCREAMING_SNAKE_CASE__ = MBartForConditionalGeneration(UpperCamelCase_ ) model.model.load_state_dict(UpperCamelCase_ ) if finetuned: SCREAMING_SNAKE_CASE__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem.""" ) parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--hf_config""", default="""facebook/mbart-large-cc25""", type=str, help="""Which huggingface architecture to use: mbart-large""", ) parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""") parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""") __snake_case = parser.parse_args() __snake_case = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
472
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : List[str] = ShapEPipeline a : str = ["prompt"] a : Tuple = ["prompt"] a : Optional[int] = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] a : int = False @property def _UpperCAmelCase (self ) -> Dict: '''simple docstring''' return 32 @property def _UpperCAmelCase (self ) -> Optional[Any]: '''simple docstring''' return 32 @property def _UpperCAmelCase (self ) -> List[str]: '''simple docstring''' return self.time_input_dim * 4 @property def _UpperCAmelCase (self ) -> Optional[Any]: '''simple docstring''' return 8 @property def _UpperCAmelCase (self ) -> Dict: '''simple docstring''' __lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __lowercase = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) return CLIPTextModelWithProjection(_lowerCamelCase ) @property def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __lowercase = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __lowercase = PriorTransformer(**_lowerCamelCase ) return model @property def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __lowercase = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __lowercase = ShapERenderer(**_lowerCamelCase ) return model def _UpperCAmelCase (self ) -> Optional[Any]: '''simple docstring''' __lowercase = self.dummy_prior __lowercase = self.dummy_text_encoder __lowercase = self.dummy_tokenizer __lowercase = self.dummy_renderer __lowercase = HeunDiscreteScheduler( beta_schedule='''exp''' ,num_train_timesteps=1024 ,prediction_type='''sample''' ,use_karras_sigmas=_lowerCamelCase ,clip_sample=_lowerCamelCase ,clip_sample_range=1.0 ,) __lowercase = { '''prior''': prior, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''renderer''': renderer, '''scheduler''': scheduler, } return components def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=0 ) -> Union[str, Any]: '''simple docstring''' if str(_lowerCamelCase ).startswith('''mps''' ): __lowercase = torch.manual_seed(_lowerCamelCase ) else: __lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) __lowercase = { '''prompt''': '''horse''', '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def _UpperCAmelCase (self ) -> Dict: '''simple docstring''' __lowercase = '''cpu''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**_lowerCamelCase ) __lowercase = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowercase = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) __lowercase = output.images[0] __lowercase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowercase = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _UpperCAmelCase (self ) -> str: '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' __lowercase = torch_device == '''cpu''' __lowercase = True self._test_inference_batch_single_identical( batch_size=2 ,test_max_difference=_lowerCamelCase ,relax_max_difference=_lowerCamelCase ,) def _UpperCAmelCase (self ) -> int: '''simple docstring''' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**_lowerCamelCase ) __lowercase = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowercase = 1 __lowercase = 2 __lowercase = self.get_dummy_inputs(_lowerCamelCase ) for key in inputs.keys(): if key in self.batch_params: __lowercase = batch_size * [inputs[key]] __lowercase = pipe(**_lowerCamelCase ,num_images_per_prompt=_lowerCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase (self ) -> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase (self ) -> str: '''simple docstring''' __lowercase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_np_out.npy''' ) __lowercase = ShapEPipeline.from_pretrained('''openai/shap-e''' ) __lowercase = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) __lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 ) __lowercase = pipe( '''a shark''' ,generator=_lowerCamelCase ,guidance_scale=1_5.0 ,num_inference_steps=64 ,frame_size=64 ,output_type='''np''' ,).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_lowerCamelCase ,_lowerCamelCase )
56
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VanForImageClassification''', '''VanModel''', '''VanPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
56
1
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Union[Dict, Any]] = None , _UpperCamelCase : Dict=True , _UpperCamelCase : List[str]=2 ) -> List[str]: '''simple docstring''' from .. import __version__ SCREAMING_SNAKE_CASE = take_from SCREAMING_SNAKE_CASE = () if not isinstance(args[0] , _UpperCamelCase ): SCREAMING_SNAKE_CASE = (args,) for attribute, version_name, message in args: if version.parse(version.parse(_UpperCamelCase ).base_version ) >= version.parse(_UpperCamelCase ): raise ValueError( f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'""" f""" version {__version__} is >= {version_name}""" ) SCREAMING_SNAKE_CASE = None if isinstance(_UpperCamelCase , _UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(_UpperCamelCase ),) SCREAMING_SNAKE_CASE = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}.""" elif hasattr(_UpperCamelCase , _UpperCamelCase ): values += (getattr(_UpperCamelCase , _UpperCamelCase ),) SCREAMING_SNAKE_CASE = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}.""" elif deprecated_kwargs is None: SCREAMING_SNAKE_CASE = f"""`{attribute}` is deprecated and will be removed in version {version_name}.""" if warning is not None: SCREAMING_SNAKE_CASE = warning + ' ' if standard_warn else '' warnings.warn(warning + message , _UpperCamelCase , stacklevel=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) > 0: SCREAMING_SNAKE_CASE = inspect.getouterframes(inspect.currentframe() )[1] SCREAMING_SNAKE_CASE = call_frame.filename SCREAMING_SNAKE_CASE = call_frame.lineno SCREAMING_SNAKE_CASE = call_frame.function SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" ) if len(_UpperCamelCase ) == 0: return elif len(_UpperCamelCase ) == 1: return values[0] return values
439
import argparse import os import re a_ : List[str] = "src/transformers/models/auto" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict a_ : Optional[Any] = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict") # re pattern that matches identifiers in mappings a_ : Optional[int] = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"") def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : bool = False ) -> Optional[int]: '''simple docstring''' with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE = f.read() SCREAMING_SNAKE_CASE = content.split('\n' ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = 0 while line_idx < len(_UpperCamelCase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: SCREAMING_SNAKE_CASE = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(' ' * indent + '(' ): new_lines.append(lines[line_idx] ) line_idx += 1 SCREAMING_SNAKE_CASE = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": SCREAMING_SNAKE_CASE = line_idx while not lines[line_idx].startswith(' ' * indent + ')' ): line_idx += 1 blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers SCREAMING_SNAKE_CASE = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : _re_identifier.search(_UpperCamelCase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f: f.write('\n'.join(_UpperCamelCase ) ) elif "\n".join(_UpperCamelCase ) != content: return True def __lowerCAmelCase ( _UpperCamelCase : bool = False ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for f in os.listdir(_UpperCamelCase ) if f.endswith('.py' )] SCREAMING_SNAKE_CASE = [sort_auto_mapping(_UpperCamelCase , overwrite=_UpperCamelCase ) for fname in fnames] if not overwrite and any(_UpperCamelCase ): SCREAMING_SNAKE_CASE = [f for f, d in zip(_UpperCamelCase , _UpperCamelCase ) if d] raise ValueError( f"""The following files have auto mappings that need sorting: {', '.join(_UpperCamelCase )}. Run `make style` to fix""" ' this.' ) if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") a_ : List[str] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
439
1
class __SCREAMING_SNAKE_CASE : def __init__( self , __lowerCAmelCase ): UpperCamelCase__ = val UpperCamelCase__ = None UpperCamelCase__ = None def _lowerCamelCase ( self , __lowerCAmelCase ): if self.val: if val < self.val: if self.left is None: UpperCamelCase__ = Node(__lowerCAmelCase ) else: self.left.insert(__lowerCAmelCase ) elif val > self.val: if self.right is None: UpperCamelCase__ = Node(__lowerCAmelCase ) else: self.right.insert(__lowerCAmelCase ) else: UpperCamelCase__ = val def _UpperCamelCase (a__ :Any , a__ :List[Any] ): """simple docstring""" if root: inorder(root.left , __A ) res.append(root.val ) inorder(root.right , __A ) def _UpperCamelCase (a__ :Optional[int] ): """simple docstring""" if len(__A ) == 0: return arr UpperCamelCase__ = Node(arr[0] ) for i in range(1 , len(__A ) ): root.insert(arr[i] ) # Traverse BST in order. UpperCamelCase__ = [] inorder(__A , __A ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
708
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ = "▁" UpperCamelCase__ = {"vocab_file": "spiece.model"} UpperCamelCase__ = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"} } UpperCamelCase__ = { "google/pegasus-xsum": 512, } UpperCamelCase__ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( _a ): snake_case : Union[str, Any] = VOCAB_FILES_NAMES snake_case : str = VOCAB_FILES_NAMES snake_case : Any = PRETRAINED_VOCAB_FILES_MAP snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<pad>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<mask_2>" , __lowerCAmelCase="<mask_1>" , __lowerCAmelCase=None , __lowerCAmelCase=103 , __lowerCAmelCase = None , **__lowerCAmelCase , ): UpperCamelCase__ = offset if additional_special_tokens is not None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError( f"""additional_special_tokens should be of type {type(__lowerCAmelCase )}, but is""" f""" {type(__lowerCAmelCase )}""" ) UpperCamelCase__ = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(__lowerCAmelCase ) , self.offset - 1 ) ] if len(set(__lowerCAmelCase ) ) != len(__lowerCAmelCase ): raise ValueError( """Please make sure that the provided additional_special_tokens do not contain an incorrectly""" f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" ) UpperCamelCase__ = additional_special_tokens_extended else: UpperCamelCase__ = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )] UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token_sent=__lowerCAmelCase , offset=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) UpperCamelCase__ = mask_token_sent UpperCamelCase__ = vocab_file UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) # add special tokens to encoder dict UpperCamelCase__ = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) UpperCamelCase__ = {v: k for k, v in self.encoder.items()} @property def _lowerCamelCase ( self ): return len(self.sp_model ) + self.offset def _lowerCamelCase ( self ): UpperCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): UpperCamelCase__ = self.__dict__.copy() UpperCamelCase__ = None return state def __setstate__( self , __lowerCAmelCase ): UpperCamelCase__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): UpperCamelCase__ = {} UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCamelCase ( self , __lowerCAmelCase ): return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def _lowerCamelCase ( self , __lowerCAmelCase ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] UpperCamelCase__ = self.sp_model.piece_to_id(__lowerCAmelCase ) return sp_id + self.offset def _lowerCamelCase ( self , __lowerCAmelCase ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: UpperCamelCase__ = self.sp_model.IdToPiece(index - self.offset ) return token def _lowerCamelCase ( self , __lowerCAmelCase ): UpperCamelCase__ = [] UpperCamelCase__ = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token UpperCamelCase__ = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def _lowerCamelCase ( self , __lowerCAmelCase=False ): return 1 def _lowerCamelCase ( self , __lowerCAmelCase ): UpperCamelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ): if already_has_special_tokens: return self._special_token_mask(__lowerCAmelCase ) elif token_ids_a is None: return self._special_token_mask(__lowerCAmelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): if not os.path.isdir(__lowerCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase__ = os.path.join( __lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , """wb""" ) as fi: UpperCamelCase__ = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
548
0
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class __snake_case ( snake_case__): """simple docstring""" @staticmethod @abstractmethod def __lowercase ( lowerCamelCase : Dict ) -> Union[str, Any]: raise NotImplementedError() @abstractmethod def __lowercase ( self : Dict ) -> List[Any]: raise NotImplementedError()
275
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { """microsoft/swinv2-tiny-patch4-window8-256""": ( """https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json""" ), } class UpperCAmelCase__ ( snake_case__ ): snake_case_ = '''swinv2''' snake_case_ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , A__=224 , A__=4 , A__=3 , A__=96 , A__=[2, 2, 6, 2] , A__=[3, 6, 12, 24] , A__=7 , A__=4.0 , A__=True , A__=0.0 , A__=0.0 , A__=0.1 , A__="gelu" , A__=False , A__=0.02 , A__=1E-5 , A__=32 , **A__ , ): """simple docstring""" super().__init__(**A__ ) UpperCAmelCase_: List[str] = image_size UpperCAmelCase_: List[str] = patch_size UpperCAmelCase_: str = num_channels UpperCAmelCase_: Optional[int] = embed_dim UpperCAmelCase_: str = depths UpperCAmelCase_: Optional[Any] = len(A__ ) UpperCAmelCase_: Optional[Any] = num_heads UpperCAmelCase_: Dict = window_size UpperCAmelCase_: Dict = mlp_ratio UpperCAmelCase_: Optional[Any] = qkv_bias UpperCAmelCase_: Optional[Any] = hidden_dropout_prob UpperCAmelCase_: Optional[int] = attention_probs_dropout_prob UpperCAmelCase_: int = drop_path_rate UpperCAmelCase_: Union[str, Any] = hidden_act UpperCAmelCase_: Any = use_absolute_embeddings UpperCAmelCase_: Optional[int] = layer_norm_eps UpperCAmelCase_: str = initializer_range UpperCAmelCase_: Tuple = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase_: Union[str, Any] = int(embed_dim * 2 ** (len(A__ ) - 1) ) UpperCAmelCase_: str = (0, 0, 0, 0)
137
0
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) set_seed(770) __lowerCAmelCase = { 'c_attn': 'att_proj', 'c_proj': 'out_proj', 'c_fc': 'in_proj', 'transformer.': '', 'h.': 'layers.', 'ln_1': 'layernorm_1', 'ln_2': 'layernorm_2', 'ln_f': 'layernorm_final', 'wpe': 'position_embeds_layer', 'wte': 'input_embeds_layer', } __lowerCAmelCase = { 'text_small': { 'repo_id': 'suno/bark', 'file_name': 'text.pt', }, 'coarse_small': { 'repo_id': 'suno/bark', 'file_name': 'coarse.pt', }, 'fine_small': { 'repo_id': 'suno/bark', 'file_name': 'fine.pt', }, 'text': { 'repo_id': 'suno/bark', 'file_name': 'text_2.pt', }, 'coarse': { 'repo_id': 'suno/bark', 'file_name': 'coarse_2.pt', }, 'fine': { 'repo_id': 'suno/bark', 'file_name': 'fine_2.pt', }, } __lowerCAmelCase = os.path.dirname(os.path.abspath(__file__)) __lowerCAmelCase = os.path.join(os.path.expanduser('~'), '.cache') __lowerCAmelCase = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0') def _UpperCAmelCase ( __A : Dict , __A : List[Any]=False ): a_ : List[Any] = model_type if use_small: key += "_small" return os.path.join(__A , REMOTE_MODEL_PATHS[key]['''file_name'''] ) def _UpperCAmelCase ( __A : List[str] , __A : str ): os.makedirs(__A , exist_ok=__A ) hf_hub_download(repo_id=__A , filename=__A , local_dir=__A ) def _UpperCAmelCase ( __A : Optional[Any] , __A : str , __A : List[Any]=False , __A : str="text" ): if model_type == "text": a_ : Any = BarkSemanticModel a_ : Optional[int] = BarkSemanticConfig a_ : Tuple = BarkSemanticGenerationConfig elif model_type == "coarse": a_ : Union[str, Any] = BarkCoarseModel a_ : Union[str, Any] = BarkCoarseConfig a_ : List[Any] = BarkCoarseGenerationConfig elif model_type == "fine": a_ : List[str] = BarkFineModel a_ : Any = BarkFineConfig a_ : Any = BarkFineGenerationConfig else: raise NotImplementedError() a_ : Optional[Any] = f'{model_type}_small' if use_small else model_type a_ : Tuple = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(__A ): logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' ) _download(model_info['''repo_id'''] , model_info['''file_name'''] ) a_ : Union[str, Any] = torch.load(__A , map_location=__A ) # this is a hack a_ : List[str] = checkpoint['''model_args'''] if "input_vocab_size" not in model_args: a_ : Optional[int] = model_args['''vocab_size'''] a_ : Optional[Any] = model_args['''vocab_size'''] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments a_ : List[str] = model_args.pop('''n_head''' ) a_ : List[str] = model_args.pop('''n_embd''' ) a_ : Any = model_args.pop('''n_layer''' ) a_ : Optional[int] = ConfigClass(**checkpoint['''model_args'''] ) a_ : List[Any] = ModelClass(config=__A ) a_ : int = GenerationConfigClass() a_ : Tuple = model_generation_config a_ : Tuple = checkpoint['''model'''] # fixup checkpoint a_ : Any = '''_orig_mod.''' for k, v in list(state_dict.items() ): if k.startswith(__A ): # replace part of the key with corresponding layer name in HF implementation a_ : Optional[int] = k[len(__A ) :] for old_layer_name in new_layer_name_dict: a_ : Optional[Any] = new_k.replace(__A , new_layer_name_dict[old_layer_name] ) a_ : Union[str, Any] = state_dict.pop(__A ) a_ : Optional[int] = set(state_dict.keys() ) - set(model.state_dict().keys() ) a_ : Optional[int] = {k for k in extra_keys if not k.endswith('''.attn.bias''' )} a_ : int = set(model.state_dict().keys() ) - set(state_dict.keys() ) a_ : Tuple = {k for k in missing_keys if not k.endswith('''.attn.bias''' )} if len(__A ) != 0: raise ValueError(f'extra keys found: {extra_keys}' ) if len(__A ) != 0: raise ValueError(f'missing keys: {missing_keys}' ) model.load_state_dict(__A , strict=__A ) a_ : Union[str, Any] = model.num_parameters(exclude_embeddings=__A ) a_ : List[str] = checkpoint['''best_val_loss'''].item() logger.info(f'model loaded: {round(n_params/1E6 , 1 )}M params, {round(__A , 3 )} loss' ) model.eval() model.to(__A ) del checkpoint, state_dict return model def _UpperCAmelCase ( __A : List[str] , __A : Tuple=False , __A : Optional[Any]="text" ): if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() a_ : Optional[int] = '''cpu''' # do conversion on cpu a_ : Tuple = _get_ckpt_path(__A , use_small=__A ) a_ : Any = _load_model(__A , __A , model_type=__A , use_small=__A ) # load bark initial model a_ : int = _bark_load_model(__A , '''cpu''' , model_type=__A , use_small=__A ) if model_type == "text": a_ : Dict = bark_model['''model'''] if model.num_parameters(exclude_embeddings=__A ) != bark_model.get_num_params(): raise ValueError('''initial and new models don\'t have the same number of parameters''' ) # check if same output as the bark model a_ : List[str] = 5 a_ : List[Any] = 10 if model_type in ["text", "coarse"]: a_ : Union[str, Any] = torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int ) a_ : Union[str, Any] = bark_model(__A )[0] a_ : List[str] = model(__A ) # take last logits a_ : Optional[Any] = output_new_model_total.logits[:, [-1], :] else: a_ : Tuple = 3 a_ : Tuple = 8 a_ : Any = torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) a_ : int = model(__A , __A ) a_ : List[Any] = bark_model(__A , __A ) a_ : Optional[int] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('''initial and new outputs don\'t have the same shape''' ) if (output_new_model - output_old_model).abs().max().item() > 1E-3: raise ValueError('''initial and new outputs are not equal''' ) Path(__A ).mkdir(exist_ok=__A ) model.save_pretrained(__A ) def _UpperCAmelCase ( __A : Dict , __A : Optional[Any] , __A : Tuple , __A : Dict , __A : Tuple , __A : int , ): a_ : Union[str, Any] = os.path.join(__A , __A ) a_ : Tuple = BarkSemanticConfig.from_pretrained(os.path.join(__A , '''config.json''' ) ) a_ : str = BarkCoarseConfig.from_pretrained(os.path.join(__A , '''config.json''' ) ) a_ : Dict = BarkFineConfig.from_pretrained(os.path.join(__A , '''config.json''' ) ) a_ : Union[str, Any] = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' ) a_ : List[str] = BarkSemanticModel.from_pretrained(__A ) a_ : List[str] = BarkCoarseModel.from_pretrained(__A ) a_ : str = BarkFineModel.from_pretrained(__A ) a_ : Optional[int] = EncodecModel.from_pretrained('''facebook/encodec_24khz''' ) a_ : Tuple = BarkConfig.from_sub_model_configs( __A , __A , __A , __A ) a_ : Dict = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) a_ : List[Any] = BarkModel(__A ) a_ : str = semantic a_ : Any = coarseAcoustic a_ : List[str] = fineAcoustic a_ : Dict = codec a_ : Tuple = bark_generation_config Path(__A ).mkdir(exist_ok=__A ) bark.save_pretrained(__A , repo_id=__A , push_to_hub=__A ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('model_type', type=str, help='text, coarse or fine.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.') __lowerCAmelCase = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
666
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): snake_case__ = IFInpaintingSuperResolutionPipeline snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} ) snake_case__ = PipelineTesterMixin.required_optional_params - {"latents"} def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: return self._get_superresolution_dummy_components() def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[Any]: if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ): a_ : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: a_ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) a_ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) a_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) a_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) a_ : Union[str, Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def SCREAMING_SNAKE_CASE ( self : int ) -> int: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: self._test_save_load_local() def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
666
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ :List[str] = logging.get_logger(__name__) lowerCAmelCase__ :Optional[Any] = { '''microsoft/beit-base-patch16-224-pt22k''': ( '''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json''' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __a ( UpperCAmelCase ): _a : str = 'beit' def __init__( self , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[3, 5, 7, 11] , _SCREAMING_SNAKE_CASE=[1, 2, 3, 6] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.4 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=255 , **_SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = use_mask_token _UpperCAmelCase = use_absolute_position_embeddings _UpperCAmelCase = use_relative_position_bias _UpperCAmelCase = use_shared_relative_position_bias _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCAmelCase = out_indices _UpperCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = semantic_loss_ignore_index class __a ( UpperCAmelCase ): _a : Union[str, Any] = version.parse('1.11' ) @property def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def UpperCAmelCase__ ( self ) -> float: """simple docstring""" return 1e-4
618
def lowerCAmelCase__ ( a__: list ) -> list: '''simple docstring''' if len(a__ ) < 2: return collection def circle_sort_util(a__: list , a__: int , a__: int ) -> bool: _UpperCAmelCase = False if low == high: return swapped _UpperCAmelCase = low _UpperCAmelCase = high while left < right: if collection[left] > collection[right]: _UpperCAmelCase , _UpperCAmelCase = ( collection[right], collection[left], ) _UpperCAmelCase = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: _UpperCAmelCase , _UpperCAmelCase = ( collection[right + 1], collection[left], ) _UpperCAmelCase = True _UpperCAmelCase = low + int((high - low) / 2 ) _UpperCAmelCase = circle_sort_util(a__ , a__ , a__ ) _UpperCAmelCase = circle_sort_util(a__ , mid + 1 , a__ ) return swapped or left_swap or right_swap _UpperCAmelCase = True while is_not_sorted is True: _UpperCAmelCase = circle_sort_util(a__ , 0 , len(a__ ) - 1 ) return collection if __name__ == "__main__": lowerCAmelCase__ :Tuple = input('''Enter numbers separated by a comma:\n''').strip() lowerCAmelCase__ :List[str] = [int(item) for item in user_input.split(''',''')] print(circle_sort(unsorted))
618
1
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration __UpperCAmelCase = 500_000 __UpperCAmelCase , __UpperCAmelCase = os.path.split(__file__) __UpperCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def _snake_case ( A , **A ) -> int: lowerCAmelCase__ = dataset.map(**A ) @get_duration def _snake_case ( A , **A ) -> Union[str, Any]: lowerCAmelCase__ = dataset.filter(**A ) def _snake_case ( ) -> Any: lowerCAmelCase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) lowerCAmelCase__ = generate_example_dataset( os.path.join(A , '''dataset.arrow''' ) , A , num_examples=A ) lowerCAmelCase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=A ) def tokenize(A ): return tokenizer(examples['''text'''] ) lowerCAmelCase__ = map(A ) lowerCAmelCase__ = map(A , batched=A ) lowerCAmelCase__ = map(A , function=lambda A : None , batched=A ) with dataset.formatted_as(type='''numpy''' ): lowerCAmelCase__ = map(A , function=lambda A : None , batched=A ) with dataset.formatted_as(type='''pandas''' ): lowerCAmelCase__ = map(A , function=lambda A : None , batched=A ) with dataset.formatted_as(type='''torch''' , columns='''numbers''' ): lowerCAmelCase__ = map(A , function=lambda A : None , batched=A ) with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ): lowerCAmelCase__ = map(A , function=lambda A : None , batched=A ) lowerCAmelCase__ = map(A , function=A , batched=A ) lowerCAmelCase__ = filter(A ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(A , '''wb''' ) as f: f.write(json.dumps(A ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
719
'''simple docstring''' import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = '''T5Config''' def _snake_case ( A , A , A ) -> jnp.ndarray: lowerCAmelCase__ = jnp.zeros_like(A ) lowerCAmelCase__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) lowerCAmelCase__ = shifted_input_ids.at[:, 0].set(A ) lowerCAmelCase__ = jnp.where(shifted_input_ids == -100 , A , A ) return shifted_input_ids class a__ ( a__ ): '''simple docstring''' lowercase__ : int = "mt5" lowercase__ : Dict = MTaConfig class a__ ( a__ ): '''simple docstring''' lowercase__ : int = "mt5" lowercase__ : Any = MTaConfig class a__ ( a__ ): '''simple docstring''' lowercase__ : Union[str, Any] = "mt5" lowercase__ : Tuple = MTaConfig
98
0
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger a_ : Optional[Any] = get_logger(__name__) a_ : Dict = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n' class _snake_case : @add_start_docstrings(a) def __call__( self , a , a) -> jnp.ndarray: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''') class _snake_case : @add_start_docstrings(a) def __call__( self , a , a) -> jnp.ndarray: raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''') class _snake_case ( A__ ): @add_start_docstrings(a) def __call__( self , a , a , a , **a) -> jnp.ndarray: for processor in self: SCREAMING_SNAKE_CASE = inspect.signature(processor.__call__).parameters if len(a) > 3: if not all(arg in kwargs for arg in list(function_args.keys())[2:]): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys())} for ''' f'''{processor.__class__} are passed to the logits processor.''') SCREAMING_SNAKE_CASE = processor(a , a , a , **a) else: SCREAMING_SNAKE_CASE = processor(a , a , a) return scores class _snake_case ( A__ ): def __init__( self , a) -> List[str]: if not isinstance(a , a) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''') SCREAMING_SNAKE_CASE = temperature def __call__( self , a , a , a) -> jnp.ndarray: SCREAMING_SNAKE_CASE = scores / self.temperature return scores class _snake_case ( A__ ): def __init__( self , a , a = -float('Inf') , a = 1) -> Optional[Any]: if not isinstance(a , a) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''') if not isinstance(a , a) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''') SCREAMING_SNAKE_CASE = top_p SCREAMING_SNAKE_CASE = filter_value SCREAMING_SNAKE_CASE = min_tokens_to_keep def __call__( self , a , a , a) -> jnp.ndarray: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = lax.top_k(a , scores.shape[-1]) SCREAMING_SNAKE_CASE = jnp.full_like(a , self.filter_value) SCREAMING_SNAKE_CASE = jax.nn.softmax(a , axis=-1).cumsum(axis=-1) SCREAMING_SNAKE_CASE = cumulative_probs < self.top_p # include the token that is higher than top_p as well SCREAMING_SNAKE_CASE = jnp.roll(a , 1) score_mask |= score_mask.at[:, 0].set(a) # min tokens to keep SCREAMING_SNAKE_CASE = score_mask.at[:, : self.min_tokens_to_keep].set(a) SCREAMING_SNAKE_CASE = jnp.where(a , a , a) SCREAMING_SNAKE_CASE = jax.lax.sort_key_val(a , a)[-1] return next_scores class _snake_case ( A__ ): def __init__( self , a , a = -float('Inf') , a = 1) -> int: if not isinstance(a , a) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''') SCREAMING_SNAKE_CASE = max(a , a) SCREAMING_SNAKE_CASE = filter_value def __call__( self , a , a , a) -> jnp.ndarray: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = scores.shape SCREAMING_SNAKE_CASE = jnp.full(batch_size * vocab_size , self.filter_value) SCREAMING_SNAKE_CASE = min(self.top_k , scores.shape[-1]) # Safety check SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = lax.top_k(a , a) SCREAMING_SNAKE_CASE = jnp.broadcast_to((jnp.arange(a) * vocab_size)[:, None] , (batch_size, topk)).flatten() SCREAMING_SNAKE_CASE = topk_scores.flatten() SCREAMING_SNAKE_CASE = topk_indices.flatten() + shift SCREAMING_SNAKE_CASE = next_scores_flat.at[topk_indices_flat].set(a) SCREAMING_SNAKE_CASE = next_scores_flat.reshape(a , a) return next_scores class _snake_case ( A__ ): def __init__( self , a) -> Any: SCREAMING_SNAKE_CASE = bos_token_id def __call__( self , a , a , a) -> jnp.ndarray: SCREAMING_SNAKE_CASE = jnp.full(scores.shape , -float('inf')) SCREAMING_SNAKE_CASE = 1 - jnp.bool_(cur_len - 1) SCREAMING_SNAKE_CASE = jnp.where(a , new_scores.at[:, self.bos_token_id].set(0) , a) return scores class _snake_case ( A__ ): def __init__( self , a , a) -> int: SCREAMING_SNAKE_CASE = max_length SCREAMING_SNAKE_CASE = eos_token_id def __call__( self , a , a , a) -> jnp.ndarray: SCREAMING_SNAKE_CASE = jnp.full(scores.shape , -float('inf')) SCREAMING_SNAKE_CASE = 1 - jnp.bool_(cur_len - self.max_length + 1) SCREAMING_SNAKE_CASE = jnp.where(a , new_scores.at[:, self.eos_token_id].set(0) , a) return scores class _snake_case ( A__ ): def __init__( self , a , a) -> int: if not isinstance(a , a) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''') if not isinstance(a , a) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''') SCREAMING_SNAKE_CASE = min_length SCREAMING_SNAKE_CASE = eos_token_id def __call__( self , a , a , a) -> jnp.ndarray: # create boolean flag to decide if min length penalty should be applied SCREAMING_SNAKE_CASE = 1 - jnp.clip(cur_len - self.min_length , 0 , 1) SCREAMING_SNAKE_CASE = jnp.where(a , scores.at[:, self.eos_token_id].set(-float('inf')) , a) return scores class _snake_case ( A__ ): def __init__( self , a , a) -> Any: SCREAMING_SNAKE_CASE = list(a) SCREAMING_SNAKE_CASE = begin_index def __call__( self , a , a , a) -> List[str]: SCREAMING_SNAKE_CASE = 1 - jnp.bool_(cur_len - self.begin_index) SCREAMING_SNAKE_CASE = jnp.where(a , scores.at[:, self.begin_suppress_tokens].set(-float('inf')) , a) return scores class _snake_case ( A__ ): def __init__( self , a) -> Any: SCREAMING_SNAKE_CASE = list(a) def __call__( self , a , a , a) -> jnp.ndarray: SCREAMING_SNAKE_CASE = scores.at[..., self.suppress_tokens].set(-float('inf')) return scores class _snake_case ( A__ ): def __init__( self , a) -> int: SCREAMING_SNAKE_CASE = dict(a) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. SCREAMING_SNAKE_CASE = jnp.ones((max(force_token_map.keys()) + 1) , dtype=jnp.intaa) * -1 for index, token in force_token_map.items(): if token is not None: SCREAMING_SNAKE_CASE = force_token_array.at[index].set(a) SCREAMING_SNAKE_CASE = jnp.intaa(a) def __call__( self , a , a , a) -> jnp.ndarray: def _force_token(a): SCREAMING_SNAKE_CASE = scores.shape[0] SCREAMING_SNAKE_CASE = self.force_token_array[generation_idx] SCREAMING_SNAKE_CASE = jnp.ones_like(a , dtype=scores.dtype) * -float('inf') SCREAMING_SNAKE_CASE = jnp.zeros((batch_size, 1) , dtype=scores.dtype) SCREAMING_SNAKE_CASE = lax.dynamic_update_slice(a , a , (0, current_token)) return new_scores SCREAMING_SNAKE_CASE = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(a) , lambda: scores , ) , ) return scores class _snake_case ( A__ ): def __init__( self , a , a , a) -> List[str]: SCREAMING_SNAKE_CASE = generate_config.eos_token_id SCREAMING_SNAKE_CASE = generate_config.no_timestamps_token_id SCREAMING_SNAKE_CASE = generate_config.no_timestamps_token_id + 1 SCREAMING_SNAKE_CASE = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(a , 'max_initial_timestamp_index'): SCREAMING_SNAKE_CASE = generate_config.max_initial_timestamp_index else: SCREAMING_SNAKE_CASE = model_config.vocab_size if self.max_initial_timestamp_index is None: SCREAMING_SNAKE_CASE = model_config.vocab_size def __call__( self , a , a , a) -> Tuple: # suppress <|notimestamps|> which is handled by without_timestamps SCREAMING_SNAKE_CASE = scores.at[:, self.no_timestamps_token_id].set(-float('inf')) def handle_pairs(a , a): SCREAMING_SNAKE_CASE = jnp.where((cur_len - self.begin_index) >= 1 , a , a) SCREAMING_SNAKE_CASE = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , a , ) SCREAMING_SNAKE_CASE = jnp.where((cur_len - self.begin_index) < 2 , a , a) SCREAMING_SNAKE_CASE = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , a , a , ) return jnp.where( a , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf')) , scores_k.at[: self.eos_token_id].set(-float('inf')) , ) , a , ) SCREAMING_SNAKE_CASE = jax.vmap(a)(a , a) SCREAMING_SNAKE_CASE = jnp.where(cur_len == self.begin_index , a , a) SCREAMING_SNAKE_CASE = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , a , ) SCREAMING_SNAKE_CASE = self.timestamp_begin + self.max_initial_timestamp_index SCREAMING_SNAKE_CASE = jnp.where( a , scores.at[:, last_allowed + 1 :].set(-float('inf')) , a , ) # if sum of probability over timestamps is above any other token, sample timestamp SCREAMING_SNAKE_CASE = jax.nn.log_softmax(a , axis=-1) def handle_cumulative_probs(a , a): SCREAMING_SNAKE_CASE = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1) SCREAMING_SNAKE_CASE = jnp.max(logprobs_k[: self.timestamp_begin]) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf')) , a , ) SCREAMING_SNAKE_CASE = jax.vmap(a)(a , a) return scores
73
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase_ : List[str] = 16 UpperCAmelCase_ : str = 32 def _A (__a , __a , __a , __a , __a = 16 ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained('''bert-base-cased''' ) SCREAMING_SNAKE_CASE_ : Any = DatasetDict( { '''train''': dataset['''train'''].select(__a ), '''validation''': dataset['''train'''].select(__a ), '''test''': dataset['''validation'''], } ) def tokenize_function(__a ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE_ : Any = datasets.map( __a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_ : List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__a ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE_ : Any = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE_ : Optional[int] = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE_ : List[Any] = 8 else: SCREAMING_SNAKE_CASE_ : List[str] = None return tokenizer.pad( __a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_ : int = DataLoader( tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a ) SCREAMING_SNAKE_CASE_ : List[Any] = DataLoader( tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a ) SCREAMING_SNAKE_CASE_ : str = DataLoader( tokenized_datasets['''test'''] , shuffle=__a , collate_fn=__a , batch_size=__a ) return train_dataloader, eval_dataloader, test_dataloader def _A (__a , __a ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = [] # Download the dataset SCREAMING_SNAKE_CASE_ : List[Any] = load_dataset('''glue''' , '''mrpc''' ) # Create our splits SCREAMING_SNAKE_CASE_ : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator SCREAMING_SNAKE_CASE_ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_ : Optional[Any] = config['''lr'''] SCREAMING_SNAKE_CASE_ : List[Any] = int(config['''num_epochs'''] ) SCREAMING_SNAKE_CASE_ : List[str] = int(config['''seed'''] ) SCREAMING_SNAKE_CASE_ : Tuple = int(config['''batch_size'''] ) SCREAMING_SNAKE_CASE_ : Any = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation SCREAMING_SNAKE_CASE_ : Dict = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: SCREAMING_SNAKE_CASE_ : Tuple = batch_size // MAX_GPU_BATCH_SIZE SCREAMING_SNAKE_CASE_ : Optional[int] = MAX_GPU_BATCH_SIZE set_seed(__a ) # New Code # # Create our folds: SCREAMING_SNAKE_CASE_ : Union[str, Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) SCREAMING_SNAKE_CASE_ : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__a ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = get_fold_dataloaders( __a , __a , __a , __a , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE_ : Tuple = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE_ : Union[str, Any] = AdamW(params=model.parameters() , lr=__a ) # Instantiate scheduler SCREAMING_SNAKE_CASE_ : Dict = get_linear_schedule_with_warmup( optimizer=__a , num_warmup_steps=1_00 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = accelerator.prepare( __a , __a , __a , __a , __a ) # Now we train the model for epoch in range(__a ): model.train() for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) SCREAMING_SNAKE_CASE_ : Optional[int] = model(**__a ) SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.loss SCREAMING_SNAKE_CASE_ : Dict = loss / gradient_accumulation_steps accelerator.backward(__a ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : str = model(**__a ) SCREAMING_SNAKE_CASE_ : Any = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__a , references=__a , ) SCREAMING_SNAKE_CASE_ : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , __a ) # New Code # # We also run predictions on the test set at the very end SCREAMING_SNAKE_CASE_ : Any = [] for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = model(**__a ) SCREAMING_SNAKE_CASE_ : int = outputs.logits SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__a , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: SCREAMING_SNAKE_CASE_ : str = torch.cat(__a , dim=0 ) SCREAMING_SNAKE_CASE_ : Tuple = torch.stack(__a , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ : Tuple = metric.compute(predictions=__a , references=__a ) accelerator.print('''Average test metrics from all folds:''' , __a ) def _A () -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=__a , default=3 , help='''The number of splits to perform across the dataset''' ) SCREAMING_SNAKE_CASE_ : List[str] = parser.parse_args() SCREAMING_SNAKE_CASE_ : int = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__a , __a ) if __name__ == "__main__": main()
512
0
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_A ) class a ( _A ): UpperCAmelCase_ : List[Any] =field(default="question-answering-extractive", metadata={"include_in_asdict_even_if_is_default": True} ) UpperCAmelCase_ : Union[str, Any] =Features({"question": Value("string" ), "context": Value("string" )} ) UpperCAmelCase_ : Dict =Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) UpperCAmelCase_ : int ="question" UpperCAmelCase_ : List[str] ="context" UpperCAmelCase_ : Optional[int] ="answers" @property def UpperCamelCase_ ( self ): return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
717
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time _UpperCamelCase : Optional[Any] = Lock() def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[Any] ): '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(__snake_case ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowercase = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowercase = min(__snake_case , __snake_case ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(__snake_case ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowercase = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowercase = max(__snake_case , __snake_case ) # after all swaps are performed, send the values back to main result_pipe[1].send(__snake_case ) def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] ): '''simple docstring''' lowercase = [] lowercase = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowercase = Pipe() lowercase = Pipe() process_array_.append( Process( target=__snake_case , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowercase = temp_rs lowercase = temp_rr for i in range(1 , len(__snake_case ) - 1 ): lowercase = Pipe() lowercase = Pipe() process_array_.append( Process( target=__snake_case , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowercase = temp_rs lowercase = temp_rr process_array_.append( Process( target=__snake_case , args=( len(__snake_case ) - 1, arr[len(__snake_case ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(__snake_case ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(__snake_case ) ): lowercase = result_pipe[p][0].recv() process_array_[p].join() return arr def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowercase = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*__snake_case ) lowercase = odd_even_transposition(__snake_case ) print('Sorted List\n' ) print(*__snake_case ) if __name__ == "__main__": main()
134
0
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCAmelCase_ ( A ): '''simple docstring''' _a : Dict = tmp_path / 'file.csv' _a : Union[str, Any] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(_lowerCamelCase , 'w' ) as f: f.write(_lowerCamelCase ) return str(_lowerCamelCase ) @pytest.fixture def UpperCAmelCase_ ( A ): '''simple docstring''' _a : Optional[int] = tmp_path / 'malformed_file.csv' _a : List[str] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(_lowerCamelCase , 'w' ) as f: f.write(_lowerCamelCase ) return str(_lowerCamelCase ) @pytest.fixture def UpperCAmelCase_ ( A , A ): '''simple docstring''' _a : Any = tmp_path / 'csv_with_image.csv' _a : List[Any] = textwrap.dedent( f'''\ image {image_file} ''' ) with open(_lowerCamelCase , 'w' ) as f: f.write(_lowerCamelCase ) return str(_lowerCamelCase ) @pytest.fixture def UpperCAmelCase_ ( A ): '''simple docstring''' _a : Dict = tmp_path / 'csv_with_label.csv' _a : Optional[int] = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(_lowerCamelCase , 'w' ) as f: f.write(_lowerCamelCase ) return str(_lowerCamelCase ) @pytest.fixture def UpperCAmelCase_ ( A ): '''simple docstring''' _a : Optional[int] = tmp_path / 'csv_with_int_list.csv' _a : Tuple = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(_lowerCamelCase , 'w' ) as f: f.write(_lowerCamelCase ) return str(_lowerCamelCase ) def UpperCAmelCase_ ( A , A , A ): '''simple docstring''' _a : Dict = Csv() _a : Any = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(_lowerCamelCase , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(_lowerCamelCase ) in record.message for record in caplog.records ) @require_pil def UpperCAmelCase_ ( A ): '''simple docstring''' with open(_lowerCamelCase , encoding='utf-8' ) as f: _a : Any = f.read().splitlines()[1] _a : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) _a : List[str] = csv._generate_tables([[csv_file_with_image]] ) _a : int = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() _a : List[str] = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCAmelCase_ ( A ): '''simple docstring''' with open(_lowerCamelCase , encoding='utf-8' ) as f: _a : List[Any] = f.read().splitlines()[1:] _a : Dict = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) _a : Optional[Any] = csv._generate_tables([[csv_file_with_label]] ) _a : Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() _a : Tuple = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_lowerCamelCase ) for label in labels] def UpperCAmelCase_ ( A ): '''simple docstring''' _a : Dict = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda A : [int(_lowerCamelCase ) for i in x.split()]} ) _a : Dict = csv._generate_tables([[csv_file_with_int_list]] ) _a : Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) _a : List[str] = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
120
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ : int = logging.get_logger(__name__) UpperCAmelCase_ : Dict = { '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase): __lowercase : List[Any] = '''swin''' __lowercase : str = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , __SCREAMING_SNAKE_CASE=224 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=96 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> int: '''simple docstring''' super().__init__(**__SCREAMING_SNAKE_CASE ) __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = embed_dim __snake_case = depths __snake_case = len(__SCREAMING_SNAKE_CASE ) __snake_case = num_heads __snake_case = window_size __snake_case = mlp_ratio __snake_case = qkv_bias __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = drop_path_rate __snake_case = hidden_act __snake_case = use_absolute_embeddings __snake_case = layer_norm_eps __snake_case = initializer_range __snake_case = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __snake_case = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) ) __snake_case = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )] __snake_case , __snake_case = get_aligned_output_features_output_indices( out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names ) class lowerCAmelCase ( __lowerCAmelCase): __lowercase : Optional[int] = version.parse('''1.11''') @property def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCAmelCase ( self ) -> float: '''simple docstring''' return 1E-4
24
0
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy snake_case = logging.get_logger(__name__) class __A ( snake_case__ ): '''simple docstring''' def __init__( self , _snake_case , _snake_case , _snake_case , **_snake_case ): _lowerCAmelCase : Union[str, Any] = feature_size _lowerCAmelCase : Tuple = sampling_rate _lowerCAmelCase : int = padding_value _lowerCAmelCase : Union[str, Any] = kwargs.pop("padding_side" , "right" ) _lowerCAmelCase : Optional[int] = kwargs.pop("return_attention_mask" , _snake_case ) super().__init__(**_snake_case ) def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = True , _snake_case = None , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = None , ): # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(_snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): _lowerCAmelCase : List[Any] = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F""" to this method that includes {self.model_input_names[0]}, but you provided""" F""" {list(processed_features.keys() )}""" ) _lowerCAmelCase : Any = processed_features[self.model_input_names[0]] _lowerCAmelCase : str = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(_snake_case ) == 0: if return_attention_mask: _lowerCAmelCase : str = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch _lowerCAmelCase : Any = required_input[0] if isinstance(_snake_case , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. _lowerCAmelCase : Union[str, Any] = 0 while len(required_input[index] ) == 0: index += 1 if index < len(_snake_case ): _lowerCAmelCase : List[Any] = required_input[index][0] if return_tensors is None: if is_tf_tensor(_snake_case ): _lowerCAmelCase : Tuple = "tf" elif is_torch_tensor(_snake_case ): _lowerCAmelCase : Any = "pt" elif isinstance(_snake_case , (int, float, list, tuple, np.ndarray) ): _lowerCAmelCase : Any = "np" else: raise ValueError( F"""type of {first_element} unknown: {type(_snake_case )}. """ "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): _lowerCAmelCase : Dict = to_numpy(_snake_case ) else: _lowerCAmelCase : Dict = [to_numpy(_snake_case ) for v in value] # Convert padding_strategy in PaddingStrategy _lowerCAmelCase : Optional[int] = self._get_padding_strategies(padding=_snake_case , max_length=_snake_case ) _lowerCAmelCase : Union[str, Any] = processed_features[self.model_input_names[0]] _lowerCAmelCase : Optional[Any] = len(_snake_case ) if not all(len(_snake_case ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) _lowerCAmelCase : List[Any] = [] for i in range(_snake_case ): _lowerCAmelCase : Optional[Any] = {k: v[i] for k, v in processed_features.items()} # truncation _lowerCAmelCase : Dict = self._truncate( _snake_case , max_length=_snake_case , pad_to_multiple_of=_snake_case , truncation=_snake_case , ) truncated_inputs.append(_snake_case ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length _lowerCAmelCase : Tuple = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) _lowerCAmelCase : List[Any] = PaddingStrategy.MAX_LENGTH _lowerCAmelCase : Tuple = {} for i in range(_snake_case ): # padding _lowerCAmelCase : Optional[int] = self._pad( truncated_inputs[i] , max_length=_snake_case , padding_strategy=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , ) for key, value in outputs.items(): if key not in batch_outputs: _lowerCAmelCase : Optional[int] = [] if value.dtype is np.dtype(np.floataa ): _lowerCAmelCase : Tuple = value.astype(np.floataa ) batch_outputs[key].append(_snake_case ) return BatchFeature(_snake_case , tensor_type=_snake_case ) def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , _snake_case = PaddingStrategy.DO_NOT_PAD , _snake_case = None , _snake_case = None , ): _lowerCAmelCase : Dict = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: _lowerCAmelCase : str = len(_snake_case ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): _lowerCAmelCase : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of _lowerCAmelCase : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_snake_case ) < max_length if return_attention_mask and "attention_mask" not in processed_features: _lowerCAmelCase : List[str] = np.ones(len(_snake_case ) , dtype=np.intaa ) if needs_to_be_padded: _lowerCAmelCase : Dict = max_length - len(_snake_case ) if self.padding_side == "right": if return_attention_mask: _lowerCAmelCase : List[str] = np.pad( processed_features["attention_mask"] , (0, difference) ) _lowerCAmelCase : Optional[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) _lowerCAmelCase : int = np.pad( _snake_case , _snake_case , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: _lowerCAmelCase : Optional[int] = np.pad( processed_features["attention_mask"] , (difference, 0) ) _lowerCAmelCase : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) _lowerCAmelCase : int = np.pad( _snake_case , _snake_case , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , ): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) _lowerCAmelCase : Any = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): _lowerCAmelCase : Optional[int] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of _lowerCAmelCase : Optional[Any] = len(_snake_case ) > max_length if needs_to_be_truncated: _lowerCAmelCase : List[Any] = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: _lowerCAmelCase : List[Any] = processed_features["attention_mask"][:max_length] return processed_features def SCREAMING_SNAKE_CASE__ ( self , _snake_case=False , _snake_case=None ): # Get padding strategy if padding is not False: if padding is True: _lowerCAmelCase : str = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(_snake_case , _snake_case ): _lowerCAmelCase : str = PaddingStrategy(_snake_case ) elif isinstance(_snake_case , _snake_case ): _lowerCAmelCase : Any = padding else: _lowerCAmelCase : Tuple = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
587
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def UpperCamelCase_ ( lowerCAmelCase__ ): """simple docstring""" if isinstance(lowerCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_tf class __A : '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ): pass def SCREAMING_SNAKE_CASE__ ( self ): pass def SCREAMING_SNAKE_CASE__ ( self ): pass def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ): _lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case ) _lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel(_snake_case ) _lowerCAmelCase : Any = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ): _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_vision_text_model(_snake_case , _snake_case ) _lowerCAmelCase : Any = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case ) _lowerCAmelCase : Union[str, Any] = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ): _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_vision_text_model(_snake_case , _snake_case ) _lowerCAmelCase : Any = {"vision_model": vision_model, "text_model": text_model} _lowerCAmelCase : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case ) _lowerCAmelCase : Dict = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ): _lowerCAmelCase , _lowerCAmelCase : List[str] = self.get_vision_text_model(_snake_case , _snake_case ) _lowerCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case ) _lowerCAmelCase : Tuple = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case ) _lowerCAmelCase : List[str] = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_snake_case ) _lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained(_snake_case ) _lowerCAmelCase : Dict = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case ) _lowerCAmelCase : Optional[int] = after_output[0].numpy() _lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_snake_case , 1E-5 ) def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ): _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_vision_text_model(_snake_case , _snake_case ) _lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case ) _lowerCAmelCase : List[str] = model( input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case ) _lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _lowerCAmelCase : List[Any] = to_atuple(vision_model.config.image_size ) _lowerCAmelCase : Optional[int] = to_atuple(vision_model.config.patch_size ) _lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _lowerCAmelCase : Any = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _lowerCAmelCase : int = output.text_model_output.attentions self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case ): _lowerCAmelCase : List[str] = np.abs((a - b) ).max() self.assertLessEqual(_snake_case , _snake_case , F"""Difference between torch and flax is {diff} (>= {tol}).""" ) def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : Any = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : Any = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : int = self.prepare_config_and_inputs() self.check_save_load(**_snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : int = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_snake_case ) @slow def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.get_pretrained_model_and_inputs() _lowerCAmelCase : List[str] = model_a(**_snake_case ) _lowerCAmelCase : List[Any] = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_snake_case ) _lowerCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(_snake_case ) _lowerCAmelCase : List[str] = model_a(**_snake_case ) _lowerCAmelCase : Any = after_outputs[0].numpy() _lowerCAmelCase : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_snake_case , 1E-5 ) @require_tf class __A ( snake_case__ ,unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" ) _lowerCAmelCase : Optional[int] = 13 _lowerCAmelCase : Optional[int] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) _lowerCAmelCase : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) _lowerCAmelCase : Optional[int] = random_attention_mask([batch_size, 4] ) _lowerCAmelCase : Tuple = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ): _lowerCAmelCase : Optional[Any] = TFViTModel(_snake_case , name="vision_model" ) _lowerCAmelCase : Union[str, Any] = TFBertModel(_snake_case , name="text_model" ) return vision_model, text_model def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : List[Any] = TFViTModelTester(self ) _lowerCAmelCase : List[str] = TFBertModelTester(self ) _lowerCAmelCase : str = vit_model_tester.prepare_config_and_inputs() _lowerCAmelCase : int = bert_model_tester.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = vision_config_and_inputs ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Dict = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __A ( snake_case__ ,unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self ): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. _lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" ) _lowerCAmelCase : List[Any] = 13 _lowerCAmelCase : Tuple = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) _lowerCAmelCase : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) _lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4] ) _lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ): _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.get_vision_text_model(_snake_case , _snake_case ) _lowerCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case ) _lowerCAmelCase : Tuple = model( input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case ) _lowerCAmelCase : Tuple = output.vision_model_output.attentions self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) _lowerCAmelCase : Any = to_atuple(vision_model.config.image_size ) _lowerCAmelCase : List[str] = to_atuple(vision_model.config.patch_size ) _lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _lowerCAmelCase : Dict = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _lowerCAmelCase : str = output.text_model_output.attentions self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ): _lowerCAmelCase : Any = TFDeiTModel(_snake_case , name="vision_model" ) _lowerCAmelCase : int = TFRobertaModel(_snake_case , name="text_model" ) return vision_model, text_model def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : Optional[int] = TFDeiTModelTester(self ) _lowerCAmelCase : Union[str, Any] = TFRobertaModelTester(self ) _lowerCAmelCase : Any = vit_model_tester.prepare_config_and_inputs() _lowerCAmelCase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = vision_config_and_inputs ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Optional[int] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __A ( snake_case__ ,unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" ) _lowerCAmelCase : List[str] = 13 _lowerCAmelCase : List[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) _lowerCAmelCase : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) _lowerCAmelCase : Tuple = random_attention_mask([batch_size, 4] ) _lowerCAmelCase : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ): _lowerCAmelCase : Any = TFCLIPVisionModel(_snake_case , name="vision_model" ) _lowerCAmelCase : Any = TFBertModel(_snake_case , name="text_model" ) return vision_model, text_model def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : Optional[int] = TFCLIPVisionModelTester(self ) _lowerCAmelCase : Union[str, Any] = TFBertModelTester(self ) _lowerCAmelCase : str = clip_model_tester.prepare_config_and_inputs() _lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase : Tuple = vision_config_and_inputs ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : Any = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __A ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE__ ( self ): _lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_snake_case ) _lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) _lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _lowerCAmelCase : Optional[int] = processor( text=["una foto di un gatto", "una foto di un cane"] , images=_snake_case , padding=_snake_case , return_tensors="np" ) _lowerCAmelCase : List[Any] = model(**_snake_case ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _lowerCAmelCase : Any = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _snake_case , atol=1E-3 ) )
587
1
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
114
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class __A ( lowerCamelCase__ ): """simple docstring""" UpperCAmelCase__ = """vit_msn""" def __init__( self , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.02 , a__=1e-06 , a__=224 , a__=16 , a__=3 , a__=True , **a__ , ): """simple docstring""" super().__init__(**a__) _lowerCamelCase : Tuple = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Optional[int] = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_act _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : Optional[Any] = layer_norm_eps _lowerCamelCase : Dict = image_size _lowerCamelCase : Tuple = patch_size _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Optional[int] = qkv_bias
114
1
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def UpperCamelCase__ ( ): assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def UpperCamelCase__ ( ): __lowerCamelCase : Optional[int] = 'mock-s3-bucket' __lowerCamelCase : Tuple = f's3://{mock_bucket}' __lowerCamelCase : List[str] = extract_path_from_uri(SCREAMING_SNAKE_CASE__ ) assert dataset_path.startswith('s3://' ) is False __lowerCamelCase : str = './local/path' __lowerCamelCase : Dict = extract_path_from_uri(SCREAMING_SNAKE_CASE__ ) assert dataset_path == new_dataset_path def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : int = is_remote_filesystem(SCREAMING_SNAKE_CASE__ ) assert is_remote is True __lowerCamelCase : Optional[Any] = fsspec.filesystem('file' ) __lowerCamelCase : Union[str, Any] = is_remote_filesystem(SCREAMING_SNAKE_CASE__ ) assert is_remote is False @pytest.mark.parametrize('compression_fs_class' , SCREAMING_SNAKE_CASE__ ) def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Optional[Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file} __lowerCamelCase : Optional[int] = input_paths[compression_fs_class.protocol] if input_path is None: __lowerCamelCase : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : List[Any] = os.path.basename(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : List[str] = expected_filename[: expected_filename.rindex('.' )] assert fs.glob('*' ) == [expected_filename] with fs.open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f, open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('protocol' , ['zip', 'gzip'] ) def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Any = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path} __lowerCamelCase : Dict = compressed_file_paths[protocol] __lowerCamelCase : Union[str, Any] = 'dataset.jsonl' __lowerCamelCase : Optional[int] = f'{protocol}://{member_file_path}::{compressed_file_path}' __lowerCamelCase , *__lowerCamelCase : str = fsspec.get_fs_token_paths(SCREAMING_SNAKE_CASE__ ) assert fs.isfile(SCREAMING_SNAKE_CASE__ ) assert not fs.isfile('non_existing_' + member_file_path ) @pytest.mark.integration def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Optional[Any] = hf_api.dataset_info(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : Any = HfFileSystem(repo_info=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ ) assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"] assert hffs.isdir('data' ) assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' ) with open(SCREAMING_SNAKE_CASE__ ) as f: assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read() def UpperCamelCase__ ( ): __lowerCamelCase : str = 'bz2' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , clobber=SCREAMING_SNAKE_CASE__ ) with pytest.warns(SCREAMING_SNAKE_CASE__ ) as warning_info: importlib.reload(datasets.filesystems ) assert len(SCREAMING_SNAKE_CASE__ ) == 1 assert ( str(warning_info[0].message ) == f'A filesystem protocol was already set for {protocol} and will be overwritten.' )
230
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { 'configuration_instructblip': [ 'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InstructBlipConfig', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig', ], 'processing_instructblip': ['InstructBlipProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'InstructBlipQFormerModel', 'InstructBlipPreTrainedModel', 'InstructBlipForConditionalGeneration', 'InstructBlipVisionModel', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
230
1
def __lowercase ( snake_case, snake_case ): """simple docstring""" while b: __magic_name__ , __magic_name__ :List[str] = b, a % b return a def __lowercase ( snake_case, snake_case ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(snake_case, a % b ) def __lowercase ( ): """simple docstring""" print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}''' ) print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}''' ) print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}''' ) print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}''' ) print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}''' ) print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}''' ) print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}''' ) print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}''' ) print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}''' ) print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}''' ) if __name__ == "__main__": main()
0
"""simple docstring""" import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class a ( unittest.TestCase ): def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict ) -> str: self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) ) for a, b in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , delta=__SCREAMING_SNAKE_CASE ) def UpperCamelCase ( self : List[Any] ) -> int: lowerCamelCase_ = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def UpperCamelCase ( self : str ) -> Any: lowerCamelCase_ = None ops.enable_eager_execution_internal() lowerCamelCase_ = tf.config.list_physical_devices('CPU' ) if len(__SCREAMING_SNAKE_CASE ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' ) lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowerCamelCase_ = GradientAccumulator() lowerCamelCase_ = tf.Variable([4.0, 3.0] ) lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5e-5 , 10 , 5 ) lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=__SCREAMING_SNAKE_CASE ) def accumulate_on_replica(__SCREAMING_SNAKE_CASE : Union[str, Any] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any ): with strategy.scope(): lowerCamelCase_ = strategy.experimental_local_results(__SCREAMING_SNAKE_CASE ) local_variables[0].assign(__SCREAMING_SNAKE_CASE ) local_variables[1].assign(__SCREAMING_SNAKE_CASE ) strategy.run(__SCREAMING_SNAKE_CASE , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(__SCREAMING_SNAKE_CASE ) def _check_local_values(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ): lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , __SCREAMING_SNAKE_CASE , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , __SCREAMING_SNAKE_CASE , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
549
0
import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaProcessor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): a_ : Tuple = True from torch.cuda.amp import autocast a_ : int = logging.getLogger(__name__) def __lowerCAmelCase ( _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=None ) -> str: '''simple docstring''' return field(default_factory=lambda: default , metadata=_UpperCamelCase ) @dataclass class UpperCamelCase : __UpperCamelCase =field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __UpperCamelCase =field( default=SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __UpperCamelCase =field( default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) __UpperCamelCase =field( default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} ) __UpperCamelCase =field( default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} ) __UpperCamelCase =field( default=0.1 , metadata={ "help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler." } , ) __UpperCamelCase =field( default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , ) __UpperCamelCase =field( default=0.05 , metadata={ "help": ( "Propability of each feature vector along the time axis to be chosen as the start of the vector" "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature" "vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``." ) } , ) __UpperCamelCase =field(default=0.0 , metadata={"help": "The LayerDrop probability."} ) @dataclass class UpperCamelCase : __UpperCamelCase =field( default=SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) __UpperCamelCase =field( default="train+validation" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" } , ) __UpperCamelCase =field( default=SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) __UpperCamelCase =field( default=SCREAMING_SNAKE_CASE , metadata={"help": "The number of processes to use for the preprocessing."} , ) __UpperCamelCase =field( default=SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __UpperCamelCase =field( default=SCREAMING_SNAKE_CASE , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." ) } , ) __UpperCamelCase =list_field( default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , ) @dataclass class UpperCamelCase : __UpperCamelCase =42 __UpperCamelCase =True __UpperCamelCase =None __UpperCamelCase =None __UpperCamelCase =None __UpperCamelCase =None def __call__( self : Optional[Any] , snake_case__ : List[Dict[str, Union[List[int], torch.Tensor]]] ): """simple docstring""" SCREAMING_SNAKE_CASE = [{'input_values': feature['input_values']} for feature in features] SCREAMING_SNAKE_CASE = [{'input_ids': feature['labels']} for feature in features] SCREAMING_SNAKE_CASE = self.processor.pad( snake_case__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) SCREAMING_SNAKE_CASE = self.processor.pad( labels=snake_case__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , ) # replace padding with -100 to ignore loss correctly SCREAMING_SNAKE_CASE = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 ) SCREAMING_SNAKE_CASE = labels return batch class UpperCamelCase ( SCREAMING_SNAKE_CASE ): def UpperCamelCase ( self : List[str] , snake_case__ : nn.Module , snake_case__ : Dict[str, Union[torch.Tensor, Any]] ): """simple docstring""" model.train() SCREAMING_SNAKE_CASE = self._prepare_inputs(snake_case__ ) if self.use_amp: with autocast(): SCREAMING_SNAKE_CASE = self.compute_loss(snake_case__ , snake_case__ ) else: SCREAMING_SNAKE_CASE = self.compute_loss(snake_case__ , snake_case__ ) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": SCREAMING_SNAKE_CASE = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": SCREAMING_SNAKE_CASE = loss.sum() / (inputs['labels'] >= 0).sum() else: raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" ) if self.args.gradient_accumulation_steps > 1: SCREAMING_SNAKE_CASE = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(snake_case__ ).backward() elif self.use_apex: with amp.scale_loss(snake_case__ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(snake_case__ ) else: loss.backward() return loss.detach() def __lowerCAmelCase ( ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() # Detecting last checkpoint. SCREAMING_SNAKE_CASE = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s' , _UpperCamelCase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: SCREAMING_SNAKE_CASE = datasets.load_dataset( 'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name ) SCREAMING_SNAKE_CASE = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' ) # Create and save tokenizer SCREAMING_SNAKE_CASE = f"""[{''.join(data_args.chars_to_ignore )}]""" def remove_special_characters(_UpperCamelCase : Any ): SCREAMING_SNAKE_CASE = re.sub(_UpperCamelCase , '' , batch['sentence'] ).lower() + ' ' return batch SCREAMING_SNAKE_CASE = train_dataset.map(_UpperCamelCase , remove_columns=['sentence'] ) SCREAMING_SNAKE_CASE = eval_dataset.map(_UpperCamelCase , remove_columns=['sentence'] ) def extract_all_chars(_UpperCamelCase : List[Any] ): SCREAMING_SNAKE_CASE = ' '.join(batch['text'] ) SCREAMING_SNAKE_CASE = list(set(_UpperCamelCase ) ) return {"vocab": [vocab], "all_text": [all_text]} SCREAMING_SNAKE_CASE = train_dataset.map( _UpperCamelCase , batched=_UpperCamelCase , batch_size=-1 , keep_in_memory=_UpperCamelCase , remove_columns=train_dataset.column_names , ) SCREAMING_SNAKE_CASE = train_dataset.map( _UpperCamelCase , batched=_UpperCamelCase , batch_size=-1 , keep_in_memory=_UpperCamelCase , remove_columns=eval_dataset.column_names , ) SCREAMING_SNAKE_CASE = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) ) SCREAMING_SNAKE_CASE = {v: k for k, v in enumerate(_UpperCamelCase )} SCREAMING_SNAKE_CASE = vocab_dict[' '] del vocab_dict[" "] SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) with open('vocab.json' , 'w' ) as vocab_file: json.dump(_UpperCamelCase , _UpperCamelCase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer( 'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , ) SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase ) SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase ) SCREAMING_SNAKE_CASE = WavaVecaForCTC.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , ) if data_args.max_train_samples is not None: SCREAMING_SNAKE_CASE = min(len(_UpperCamelCase ) , data_args.max_train_samples ) SCREAMING_SNAKE_CASE = train_dataset.select(range(_UpperCamelCase ) ) if data_args.max_val_samples is not None: SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_val_samples ) ) SCREAMING_SNAKE_CASE = torchaudio.transforms.Resample(4_80_00 , 1_60_00 ) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(_UpperCamelCase : Union[str, Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torchaudio.load(batch['path'] ) SCREAMING_SNAKE_CASE = resampler(_UpperCamelCase ).squeeze().numpy() SCREAMING_SNAKE_CASE = 1_60_00 SCREAMING_SNAKE_CASE = batch['text'] return batch SCREAMING_SNAKE_CASE = train_dataset.map( _UpperCamelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) SCREAMING_SNAKE_CASE = eval_dataset.map( _UpperCamelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) def prepare_dataset(_UpperCamelCase : Union[str, Any] ): # check that all files have the correct sampling rate assert ( len(set(batch['sampling_rate'] ) ) == 1 ), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.""" SCREAMING_SNAKE_CASE = processor( audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] ) batch.update(_UpperCamelCase ) return batch SCREAMING_SNAKE_CASE = train_dataset.map( _UpperCamelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , ) SCREAMING_SNAKE_CASE = eval_dataset.map( _UpperCamelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , ) # Metric SCREAMING_SNAKE_CASE = datasets.load_metric('wer' ) def compute_metrics(_UpperCamelCase : Dict ): SCREAMING_SNAKE_CASE = pred.predictions SCREAMING_SNAKE_CASE = np.argmax(_UpperCamelCase , axis=-1 ) SCREAMING_SNAKE_CASE = processor.tokenizer.pad_token_id SCREAMING_SNAKE_CASE = processor.batch_decode(_UpperCamelCase ) # we do not want to group tokens when computing the metrics SCREAMING_SNAKE_CASE = processor.batch_decode(pred.label_ids , group_tokens=_UpperCamelCase ) SCREAMING_SNAKE_CASE = wer_metric.compute(predictions=_UpperCamelCase , references=_UpperCamelCase ) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator SCREAMING_SNAKE_CASE = DataCollatorCTCWithPadding(processor=_UpperCamelCase , padding=_UpperCamelCase ) # Initialize our Trainer SCREAMING_SNAKE_CASE = CTCTrainer( model=_UpperCamelCase , data_collator=_UpperCamelCase , args=_UpperCamelCase , compute_metrics=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , ) # Training if training_args.do_train: if last_checkpoint is not None: SCREAMING_SNAKE_CASE = last_checkpoint elif os.path.isdir(model_args.model_name_or_path ): SCREAMING_SNAKE_CASE = model_args.model_name_or_path else: SCREAMING_SNAKE_CASE = None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank ): processor.save_pretrained(training_args.output_dir ) SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=_UpperCamelCase ) trainer.save_model() SCREAMING_SNAKE_CASE = train_result.metrics SCREAMING_SNAKE_CASE = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase ) ) SCREAMING_SNAKE_CASE = min(_UpperCamelCase , len(_UpperCamelCase ) ) trainer.log_metrics('train' , _UpperCamelCase ) trainer.save_metrics('train' , _UpperCamelCase ) trainer.save_state() # Evaluation SCREAMING_SNAKE_CASE = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) SCREAMING_SNAKE_CASE = trainer.evaluate() SCREAMING_SNAKE_CASE = data_args.max_val_samples if data_args.max_val_samples is not None else len(_UpperCamelCase ) SCREAMING_SNAKE_CASE = min(_UpperCamelCase , len(_UpperCamelCase ) ) trainer.log_metrics('eval' , _UpperCamelCase ) trainer.save_metrics('eval' , _UpperCamelCase ) return results if __name__ == "__main__": main()
704
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( "pipelines_utils", "0.22.0", "Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.", standard_warn=False, stacklevel=3, )
673
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {'vocab_file': 'spm_char.model'} __a = { 'vocab_file': { 'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model', 'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model', 'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model', } } __a = { 'microsoft/speecht5_asr': 1_0_2_4, 'microsoft/speecht5_tts': 1_0_2_4, 'microsoft/speecht5_vc': 1_0_2_4, } class lowercase__( UpperCAmelCase ): """simple docstring""" a :Dict = VOCAB_FILES_NAMES a :Dict = PRETRAINED_VOCAB_FILES_MAP a :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a :str = ['input_ids', 'attention_mask'] def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Tuple="<pad>" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> None: lowercase_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , ) lowercase_ = vocab_file lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(SCREAMING_SNAKE_CASE_ ) @property def _lowercase ( self : Optional[Any] ) -> Dict: return self.sp_model.get_piece_size() def _lowercase ( self : Optional[int] ) -> List[Any]: lowercase_ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : int ) -> List[Any]: lowercase_ = self.__dict__.copy() lowercase_ = None return state def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]: lowercase_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase_ = {} lowercase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> List[str]: return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]: return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]: lowercase_ = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ ) return token def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ) -> str: lowercase_ = [] lowercase_ = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token lowercase_ = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE_ ) out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) return out_string.strip() def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) lowercase_ = [1] if token_ids_a is None: return ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones return ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi: lowercase_ = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
97
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger() @dataclass class A : __snake_case = 42 __snake_case = field(default_factory=__UpperCAmelCase ) __snake_case = field(default_factory=__UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase__, nn.Convad ) or isinstance(UpperCamelCase__, nn.BatchNormad ) if has_not_submodules: self.traced.append(UpperCamelCase__ ) def __call__( self, UpperCamelCase__ ): """simple docstring""" for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(UpperCamelCase__ ) [x.remove() for x in self.handles] return self @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return list(filter(lambda UpperCamelCase__ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) ) @dataclass class A : __snake_case = 42 __snake_case = 42 __snake_case = 1 __snake_case = field(default_factory=__UpperCAmelCase ) __snake_case = field(default_factory=__UpperCAmelCase ) __snake_case = True def __call__( self, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = Tracker(self.dest )(UpperCamelCase__ ).parametrized lowerCAmelCase_ = Tracker(self.src )(UpperCamelCase__ ).parametrized lowerCAmelCase_ = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.src_skip, UpperCamelCase__ ) ) lowerCAmelCase_ = list(filter(lambda UpperCamelCase__ : type(UpperCamelCase__ ) not in self.dest_skip, UpperCamelCase__ ) ) if len(UpperCamelCase__ ) != len(UpperCamelCase__ ) and self.raise_if_mismatch: raise Exception( f"Numbers of operations are different. Source module has {len(UpperCamelCase__ )} operations while" f" destination module has {len(UpperCamelCase__ )}." ) for dest_m, src_m in zip(UpperCamelCase__, UpperCamelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"Transfered from={src_m} to={dest_m}" ) class A ( nn.Module ): def __init__( self, UpperCamelCase__ ): """simple docstring""" super().__init__() lowerCAmelCase_ = [] # - get the stem feature_blocks.append(('''conv1''', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('''block''' ), f"Unexpected layer name {k}" lowerCAmelCase_ = len(UpperCamelCase__ ) + 1 feature_blocks.append((f"res{block_index}", v) ) lowerCAmelCase_ = nn.ModuleDict(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return get_trunk_forward_outputs( UpperCamelCase__, out_feat_keys=UpperCamelCase__, feature_blocks=self._feature_blocks, ) class A ( __UpperCAmelCase ): def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" lowerCAmelCase_ = x.split('''-''' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self, UpperCamelCase__ ): """simple docstring""" if x not in self: lowerCAmelCase_ = self.convert_name_to_timm(UpperCamelCase__ ) lowerCAmelCase_ = partial(lambda: (timm.create_model(UpperCamelCase__, pretrained=UpperCamelCase__ ).eval(), None) ) else: lowerCAmelCase_ = super().__getitem__(UpperCamelCase__ ) return val class A ( __UpperCAmelCase ): def __getitem__( self, UpperCamelCase__ ): """simple docstring""" if "seer" in x and "in1k" not in x: lowerCAmelCase_ = RegNetModel else: lowerCAmelCase_ = RegNetForImageClassification return val def __UpperCamelCase ( _A , _A , _A ): for from_key, to_key in keys: lowerCAmelCase_ = from_state_dict[from_key].clone() print(f"Copied key={from_key} to={to_key}" ) return to_state_dict def __UpperCamelCase ( _A , _A , _A , _A , _A , _A = True , ): print(f"Converting {name}..." ) with torch.no_grad(): lowerCAmelCase_ , lowerCAmelCase_ = from_model_func() lowerCAmelCase_ = our_model_func(_A ).eval() lowerCAmelCase_ = ModuleTransfer(src=_A , dest=_A , raise_if_mismatch=_A ) lowerCAmelCase_ = torch.randn((1, 3, 224, 224) ) module_transfer(_A ) if from_state_dict is not None: lowerCAmelCase_ = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: lowerCAmelCase_ = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')] lowerCAmelCase_ = manually_copy_vissl_head(_A , our_model.state_dict() , _A ) our_model.load_state_dict(_A ) lowerCAmelCase_ = our_model(_A , output_hidden_states=_A ) lowerCAmelCase_ = ( our_outputs.logits if isinstance(_A , _A ) else our_outputs.last_hidden_state ) lowerCAmelCase_ = from_model(_A ) lowerCAmelCase_ = from_output[-1] if type(_A ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: lowerCAmelCase_ = our_outputs.hidden_states[-1] assert torch.allclose(_A , _A ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_A , ) lowerCAmelCase_ = 224 if '''seer''' not in name else 384 # we can use the convnext one lowerCAmelCase_ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_A ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_A , ) print(f"Pushed {name}" ) def __UpperCamelCase ( _A , _A = None , _A = True ): lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = 1000 lowerCAmelCase_ = (1, num_labels) lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = num_labels lowerCAmelCase_ = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='''dataset''' ) ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} lowerCAmelCase_ = partial(_A , num_labels=_A , idalabel=_A , labelaid=_A ) lowerCAmelCase_ = { '''regnet-x-002''': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ), '''regnet-x-004''': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ), '''regnet-x-006''': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ), '''regnet-x-008''': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ), '''regnet-x-016''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ), '''regnet-x-032''': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='''x''' ), '''regnet-x-040''': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='''x''' ), '''regnet-x-064''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='''x''' ), '''regnet-x-080''': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='''x''' ), '''regnet-x-120''': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='''x''' ), '''regnet-x-160''': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='''x''' ), '''regnet-x-320''': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='''x''' ), # y variant '''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), '''regnet-y-004''': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), '''regnet-y-006''': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), '''regnet-y-008''': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), '''regnet-y-016''': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), '''regnet-y-032''': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ), '''regnet-y-040''': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ), '''regnet-y-064''': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ), '''regnet-y-080''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ), '''regnet-y-120''': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ), '''regnet-y-160''': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ), '''regnet-y-320''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 '''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), '''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), '''regnet-y-1280-seer''': RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), '''regnet-y-2560-seer''': RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), '''regnet-y-10b-seer''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), # finetuned on imagenet '''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), '''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), '''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), '''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), '''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), } lowerCAmelCase_ = NameToOurModelFuncMap() lowerCAmelCase_ = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(_A , _A ) -> Tuple[nn.Module, Dict]: lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , model_dir=str(_A ) , map_location='''cpu''' ) lowerCAmelCase_ = model_func() # check if we have a head, if yes add it lowerCAmelCase_ = files['''classy_state_dict''']['''base_model''']['''model'''] lowerCAmelCase_ = model_state_dict['''trunk'''] model.load_state_dict(_A ) return model.eval(), model_state_dict["heads"] # pretrained lowerCAmelCase_ = partial( _A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) lowerCAmelCase_ = partial( _A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) lowerCAmelCase_ = partial( _A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) lowerCAmelCase_ = partial( _A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , ) # IN1K finetuned lowerCAmelCase_ = partial( _A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) lowerCAmelCase_ = partial( _A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) lowerCAmelCase_ = partial( _A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) lowerCAmelCase_ = partial( _A , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , ) if model_name: convert_weight_and_push( _A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _A , _A , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( _A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _A , _A , _A , ) return config, expected_shape if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported regnet* architecture,''' ''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) _A = parser.parse_args() _A = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
431
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig class snake_case ( UpperCAmelCase_ ): '''simple docstring''' _A : Union[str, Any] = 'bert-generation' def __init__( self : str , __lowercase : List[str]=50_358 , __lowercase : str=1_024 , __lowercase : str=24 , __lowercase : Any=16 , __lowercase : Optional[int]=4_096 , __lowercase : Optional[int]="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : Optional[Any]=512 , __lowercase : List[Any]=0.0_2 , __lowercase : List[str]=1e-12 , __lowercase : List[Any]=0 , __lowercase : int=2 , __lowercase : int=1 , __lowercase : List[Any]="absolute" , __lowercase : Tuple=True , **__lowercase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : List[Any] = hidden_size __UpperCAmelCase : Any = num_hidden_layers __UpperCAmelCase : List[Any] = num_attention_heads __UpperCAmelCase : Dict = hidden_act __UpperCAmelCase : Union[str, Any] = intermediate_size __UpperCAmelCase : str = hidden_dropout_prob __UpperCAmelCase : Any = attention_probs_dropout_prob __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : str = initializer_range __UpperCAmelCase : Tuple = layer_norm_eps __UpperCAmelCase : Dict = position_embedding_type __UpperCAmelCase : int = use_cache
707
"""simple docstring""" def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple: """simple docstring""" return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=0 ) ->Dict: """simple docstring""" return sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x[column] ) def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=float('''inf''' ) ) ->str: """simple docstring""" for i in range(points_counts - 1 ): for j in range(i + 1 , UpperCAmelCase_ ): __UpperCAmelCase : List[Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __UpperCAmelCase : Tuple = current_dis return min_dis def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=float('''inf''' ) ) ->str: """simple docstring""" for i in range(min(6 , points_counts - 1 ) , UpperCAmelCase_ ): for j in range(max(0 , i - 6 ) , UpperCAmelCase_ ): __UpperCAmelCase : Dict = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __UpperCAmelCase : Tuple = current_dis return min_dis def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Any: """simple docstring""" if points_counts <= 3: return dis_between_closest_pair(UpperCAmelCase_ , UpperCAmelCase_ ) # recursion __UpperCAmelCase : Any = points_counts // 2 __UpperCAmelCase : Any = closest_pair_of_points_sqr( UpperCAmelCase_ , points_sorted_on_y[:mid] , UpperCAmelCase_ ) __UpperCAmelCase : Tuple = closest_pair_of_points_sqr( UpperCAmelCase_ , points_sorted_on_y[mid:] , points_counts - mid ) __UpperCAmelCase : List[Any] = min(UpperCAmelCase_ , UpperCAmelCase_ ) __UpperCAmelCase : int = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(UpperCAmelCase_ ) __UpperCAmelCase : Union[str, Any] = dis_between_closest_in_strip( UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ ) return min(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]: """simple docstring""" __UpperCAmelCase : str = column_based_sort(UpperCAmelCase_ , column=0 ) __UpperCAmelCase : Any = column_based_sort(UpperCAmelCase_ , column=1 ) return ( closest_pair_of_points_sqr( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ) ** 0.5 if __name__ == "__main__": lowercase__ :Optional[Any] = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print('Distance:', closest_pair_of_points(points, len(points)))
374
0
'''simple docstring''' from PIL import Image def _lowerCamelCase (__lowerCamelCase : Image ) -> Image: a__ , a__ = image.size a__ = 0 a__ = image.load() for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): a__ = pixels[j, i] mean += pixel mean //= width * height for j in range(__lowerCamelCase ): for i in range(__lowerCamelCase ): a__ = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": lowerCAmelCase_ : Dict = mean_threshold(Image.open("path_to_image").convert("L")) image.save("output_image_path")
489
'''simple docstring''' from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ : def __init__( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Union[str, Any]=3_2 , lowerCamelCase : str=3 , lowerCamelCase : Optional[int]=1_0 , lowerCamelCase : List[str]=[1_0, 2_0, 3_0, 4_0] , lowerCamelCase : str=[1, 1, 2, 1] , lowerCamelCase : List[Any]=True , lowerCamelCase : Tuple=True , lowerCamelCase : Any="relu" , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Any=None , ): '''simple docstring''' a__ = parent a__ = batch_size a__ = image_size a__ = num_channels a__ = embeddings_size a__ = hidden_sizes a__ = depths a__ = is_training a__ = use_labels a__ = hidden_act a__ = num_labels a__ = scope a__ = len(lowerCamelCase ) def __a ( self : Dict ): '''simple docstring''' a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ = None if self.use_labels: a__ = ids_tensor([self.batch_size] , self.num_labels ) a__ = self.get_config() return config, pixel_values, labels def __a ( self : Tuple ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __a ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ): '''simple docstring''' a__ = TFRegNetModel(config=lowerCamelCase ) a__ = model(lowerCamelCase , training=lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def __a ( self : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : str ): '''simple docstring''' a__ = self.num_labels a__ = TFRegNetForImageClassification(lowerCamelCase ) a__ = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : Dict ): '''simple docstring''' a__ = self.prepare_config_and_inputs() a__ , a__ , a__ = config_and_inputs a__ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( __lowerCAmelCase ,__lowerCAmelCase ,unittest.TestCase ): lowerCAmelCase__ : Dict = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCAmelCase__ : int = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCAmelCase__ : Dict = False lowerCAmelCase__ : Dict = False lowerCAmelCase__ : List[str] = False lowerCAmelCase__ : int = False lowerCAmelCase__ : Union[str, Any] = False def __a ( self : Dict ): '''simple docstring''' a__ = TFRegNetModelTester(self ) a__ = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase ) def __a ( self : Union[str, Any] ): '''simple docstring''' return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def __a ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def __a ( self : int ): '''simple docstring''' pass def __a ( self : Union[str, Any] ): '''simple docstring''' a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ = model_class(lowerCamelCase ) a__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ = [*signature.parameters.keys()] a__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase ) def __a ( self : Optional[Any] ): '''simple docstring''' a__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def __a ( self : Dict ): '''simple docstring''' def check_hidden_states_output(lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : List[Any] ): a__ = model_class(lowerCamelCase ) a__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase ) a__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a__ = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common() a__ = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: a__ = layer_type a__ = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a__ = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def __a ( self : List[Any] ): '''simple docstring''' a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Any={} ): a__ = model(lowerCamelCase , return_dict=lowerCamelCase , **lowerCamelCase ) a__ = model(lowerCamelCase , return_dict=lowerCamelCase , **lowerCamelCase ).to_tuple() def recursive_check(lowerCamelCase : str , lowerCamelCase : Dict ): if isinstance(lowerCamelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowerCamelCase , lowerCamelCase ): recursive_check(lowerCamelCase , lowerCamelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowerCamelCase , lowerCamelCase ) ) , msg=( "Tuple and dict output are not equal. Difference:" F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(lowerCamelCase , lowerCamelCase ) for model_class in self.all_model_classes: a__ = model_class(lowerCamelCase ) a__ = self._prepare_for_class(lowerCamelCase , lowerCamelCase ) a__ = self._prepare_for_class(lowerCamelCase , lowerCamelCase ) check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase ) a__ = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase ) a__ = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase ) check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase ) a__ = self._prepare_for_class(lowerCamelCase , lowerCamelCase ) a__ = self._prepare_for_class(lowerCamelCase , lowerCamelCase ) check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase , {"output_hidden_states": True} ) a__ = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase ) a__ = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase ) check_equivalence(lowerCamelCase , lowerCamelCase , lowerCamelCase , {"output_hidden_states": True} ) def __a ( self : Optional[int] ): '''simple docstring''' a__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ = TFRegNetModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def _lowerCamelCase () -> List[Any]: a__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Dict ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __a ( self : Tuple ): '''simple docstring''' a__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) a__ = self.default_image_processor a__ = prepare_img() a__ = image_processor(images=lowerCamelCase , return_tensors="tf" ) # forward pass a__ = model(**lowerCamelCase , training=lowerCamelCase ) # verify the logits a__ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) a__ = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 )
489
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class __lowercase: """simple docstring""" UpperCamelCase_ = MBartConfig UpperCamelCase_ = {} UpperCamelCase_ = '''gelu''' def __init__( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any]=13 , _lowerCAmelCase : Tuple=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : List[str]=99 , _lowerCAmelCase : int=32 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : Dict=37 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[str]=20 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : int=0 , ) -> Tuple: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = eos_token_id _lowerCAmelCase = pad_token_id _lowerCAmelCase = bos_token_id def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _lowerCAmelCase = prepare_mbart_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return config, inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: _lowerCAmelCase = TFMBartModel(config=_lowerCAmelCase ).get_decoder() _lowerCAmelCase = inputs_dict['input_ids'] _lowerCAmelCase = input_ids[:1, :] _lowerCAmelCase = inputs_dict['attention_mask'][:1, :] _lowerCAmelCase = inputs_dict['head_mask'] _lowerCAmelCase = 1 # first forward pass _lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase = outputs.to_tuple() _lowerCAmelCase = past_key_values[1] def _a ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , ): """simple docstring""" if attention_mask is None: _lowerCAmelCase = tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _lowerCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __lowercase( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" UpperCamelCase_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () UpperCamelCase_ = (TFMBartForConditionalGeneration,) if is_tf_available() else () UpperCamelCase_ = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase_ = True UpperCamelCase_ = False UpperCamelCase_ = False def SCREAMING_SNAKE_CASE_ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Dict ) -> Tuple: if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]: _lowerCAmelCase = TFMBartModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : str ) -> str: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class __lowercase( unittest.TestCase ): """simple docstring""" UpperCamelCase_ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] UpperCamelCase_ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] UpperCamelCase_ = '''facebook/mbart-large-en-ro''' @cached_property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]: return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str: _lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def SCREAMING_SNAKE_CASE_ ( self : Dict , **_lowerCAmelCase : Union[str, Any] ) -> Any: _lowerCAmelCase = self.translate_src_text(**_lowerCAmelCase ) self.assertListEqual(self.expected_text , _lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any , **_lowerCAmelCase : List[str] ) -> List[str]: _lowerCAmelCase = self.tokenizer(self.src_text , **_lowerCAmelCase , return_tensors='tf' ) _lowerCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) _lowerCAmelCase = self.tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) return generated_words @slow def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict: self._assert_generated_batch_equal_expected()
704
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def _a ( __SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] for rt in rc.restypes: _lowerCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) _lowerCAmelCase = {name: i for i, name in enumerate(__SCREAMING_SNAKE_CASE )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) _lowerCAmelCase = torch.tensor( __SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['aatype'].device , ) _lowerCAmelCase = torch.tensor( __SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['aatype'].device , ) _lowerCAmelCase = torch.tensor( __SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=protein['aatype'].device , ) _lowerCAmelCase = protein['aatype'].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein _lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype] _lowerCAmelCase = restype_atomaa_mask[protein_aatype] _lowerCAmelCase = residx_atomaa_mask _lowerCAmelCase = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back _lowerCAmelCase = restype_atomaa_to_atomaa[protein_aatype] _lowerCAmelCase = residx_atomaa_to_atomaa.long() # create the corresponding mask _lowerCAmelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device ) for restype, restype_letter in enumerate(rc.restypes ): _lowerCAmelCase = rc.restype_atoa[restype_letter] _lowerCAmelCase = rc.residue_atoms[restype_name] for atom_name in atom_names: _lowerCAmelCase = rc.atom_order[atom_name] _lowerCAmelCase = 1 _lowerCAmelCase = restype_atomaa_mask[protein_aatype] _lowerCAmelCase = residx_atomaa_mask return protein def _a ( __SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ): """simple docstring""" _lowerCAmelCase = tree_map(lambda __SCREAMING_SNAKE_CASE : torch.tensor(__SCREAMING_SNAKE_CASE , device=batch['aatype'].device ) , __SCREAMING_SNAKE_CASE , np.ndarray ) _lowerCAmelCase = tensor_tree_map(lambda __SCREAMING_SNAKE_CASE : np.array(__SCREAMING_SNAKE_CASE ) , make_atomaa_masks(__SCREAMING_SNAKE_CASE ) ) return out
585
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a_ : Optional[Any] = logging.get_logger(__name__) a_ : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} a_ : int = { "tokenizer_file": { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json", }, } a_ : int = { "gpt-neox-20b": 2048, } class UpperCamelCase ( SCREAMING_SNAKE_CASE ): __UpperCamelCase =VOCAB_FILES_NAMES __UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase =["input_ids", "attention_mask"] def __init__( self : Dict , snake_case__ : Optional[int]=None , snake_case__ : Any=None , snake_case__ : str=None , snake_case__ : int="<|endoftext|>" , snake_case__ : Tuple="<|endoftext|>" , snake_case__ : str="<|endoftext|>" , snake_case__ : Any=False , **snake_case__ : int , ): """simple docstring""" super().__init__( snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , ) SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space: SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) ) SCREAMING_SNAKE_CASE = add_prefix_space SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ ) SCREAMING_SNAKE_CASE = add_prefix_space def UpperCamelCase ( self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ): """simple docstring""" SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ ) def UpperCamelCase ( self : Optional[int] , snake_case__ : "Conversation" ): """simple docstring""" SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(snake_case__ , add_special_tokens=snake_case__ ) + [self.eos_token_id] ) if len(snake_case__ ) > self.model_max_length: SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] return input_ids
439
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCamelCase ( unittest.TestCase ): def UpperCamelCase ( self : Optional[int] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = (3_2, 3_2) SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ ) return image @property def UpperCamelCase ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) return model @property def UpperCamelCase ( self : str ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def UpperCamelCase ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = RobertaSeriesConfig( hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , ) return RobertaSeriesModelWithTransformation(snake_case__ ) @property def UpperCamelCase ( self : Dict ): """simple docstring""" def extract(*snake_case__ : List[Any] , **snake_case__ : Union[str, Any] ): class UpperCamelCase : def __init__( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE = torch.ones([0] ) def UpperCamelCase ( self : Any , snake_case__ : List[str] ): """simple docstring""" self.pixel_values.to(snake_case__ ) return self return Out() return extract def UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE = self.dummy_cond_unet SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ ) SCREAMING_SNAKE_CASE = self.dummy_vae SCREAMING_SNAKE_CASE = self.dummy_text_encoder SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) SCREAMING_SNAKE_CASE = 7_7 SCREAMING_SNAKE_CASE = self.dummy_image.to(snake_case__ ) SCREAMING_SNAKE_CASE = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline( unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , ) SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ ) SCREAMING_SNAKE_CASE = alt_pipe.to(snake_case__ ) alt_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger' SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 ) SCREAMING_SNAKE_CASE = alt_pipe( [prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=snake_case__ , ) SCREAMING_SNAKE_CASE = output.images SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(0 ) SCREAMING_SNAKE_CASE = alt_pipe( [prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=snake_case__ , return_dict=snake_case__ , )[0] SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) SCREAMING_SNAKE_CASE = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE = self.dummy_cond_unet SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ ) SCREAMING_SNAKE_CASE = self.dummy_vae SCREAMING_SNAKE_CASE = self.dummy_text_encoder SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) SCREAMING_SNAKE_CASE = 7_7 SCREAMING_SNAKE_CASE = self.dummy_image.to(snake_case__ ) # put models in fp16 SCREAMING_SNAKE_CASE = unet.half() SCREAMING_SNAKE_CASE = vae.half() SCREAMING_SNAKE_CASE = bert.half() # make sure here that pndm scheduler skips prk SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline( unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , ) SCREAMING_SNAKE_CASE = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ ) SCREAMING_SNAKE_CASE = alt_pipe.to(snake_case__ ) alt_pipe.set_progress_bar_config(disable=snake_case__ ) SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger' SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = alt_pipe( [prompt] , generator=snake_case__ , num_inference_steps=2 , output_type='np' , image=snake_case__ , ).images assert image.shape == (1, 3_2, 3_2, 3) @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) # resize to resolution that is divisible by 8 but not 16 or 32 SCREAMING_SNAKE_CASE = init_image.resize((7_6_0, 5_0_4) ) SCREAMING_SNAKE_CASE = 'BAAI/AltDiffusion' SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained( snake_case__ , safety_checker=snake_case__ , ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE = 'A fantasy landscape, trending on artstation' SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe( prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type='np' , ) SCREAMING_SNAKE_CASE = output.images[0] SCREAMING_SNAKE_CASE = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 7_6_0, 3) SCREAMING_SNAKE_CASE = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def UpperCamelCase ( self : int ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) SCREAMING_SNAKE_CASE = init_image.resize((7_6_8, 5_1_2) ) SCREAMING_SNAKE_CASE = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' ) SCREAMING_SNAKE_CASE = 'BAAI/AltDiffusion' SCREAMING_SNAKE_CASE = AltDiffusionImgaImgPipeline.from_pretrained( snake_case__ , safety_checker=snake_case__ , ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE = 'A fantasy landscape, trending on artstation' SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE = pipe( prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type='np' , ) SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (5_1_2, 7_6_8, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
439
1
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Tuple = (DEISMultistepScheduler,) __UpperCAmelCase : List[str] = (("num_inference_steps", 25),) def _UpperCamelCase ( self , **a_ ): lowerCamelCase_ : Union[str, Any] = { "num_train_timesteps": 1000, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, } config.update(**a_ ) return config def _UpperCamelCase ( self , a_=0 , **a_ ): lowerCamelCase_ : Any = dict(self.forward_default_kwargs ) lowerCamelCase_ : Any = kwargs.pop("num_inference_steps" , a_ ) lowerCamelCase_ : Dict = self.dummy_sample lowerCamelCase_ : str = 0.1 * sample lowerCamelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowerCamelCase_ : List[Any] = self.get_scheduler_config(**a_ ) lowerCamelCase_ : List[Any] = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals lowerCamelCase_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) lowerCamelCase_ : Tuple = scheduler_class.from_pretrained(a_ ) new_scheduler.set_timesteps(a_ ) # copy over dummy past residuals lowerCamelCase_ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = sample, sample for t in range(a_ , time_step + scheduler.config.solver_order + 1 ): lowerCamelCase_ : List[str] = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample lowerCamelCase_ : Any = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _UpperCamelCase ( self ): pass def _UpperCamelCase ( self , a_=0 , **a_ ): lowerCamelCase_ : int = dict(self.forward_default_kwargs ) lowerCamelCase_ : Tuple = kwargs.pop("num_inference_steps" , a_ ) lowerCamelCase_ : Union[str, Any] = self.dummy_sample lowerCamelCase_ : Tuple = 0.1 * sample lowerCamelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowerCamelCase_ : Tuple = self.get_scheduler_config() lowerCamelCase_ : List[Any] = scheduler_class(**a_ ) scheduler.set_timesteps(a_ ) # copy over dummy past residuals (must be after setting timesteps) lowerCamelCase_ : Tuple = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a_ ) lowerCamelCase_ : Optional[Any] = scheduler_class.from_pretrained(a_ ) # copy over dummy past residuals new_scheduler.set_timesteps(a_ ) # copy over dummy past residual (must be after setting timesteps) lowerCamelCase_ : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCamelCase_ : Optional[Any] = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample lowerCamelCase_ : List[Any] = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _UpperCamelCase ( self , a_=None , **a_ ): if scheduler is None: lowerCamelCase_ : Dict = self.scheduler_classes[0] lowerCamelCase_ : str = self.get_scheduler_config(**a_ ) lowerCamelCase_ : Union[str, Any] = scheduler_class(**a_ ) lowerCamelCase_ : List[str] = self.scheduler_classes[0] lowerCamelCase_ : List[Any] = self.get_scheduler_config(**a_ ) lowerCamelCase_ : List[Any] = scheduler_class(**a_ ) lowerCamelCase_ : Optional[int] = 10 lowerCamelCase_ : Union[str, Any] = self.dummy_model() lowerCamelCase_ : Any = self.dummy_sample_deter scheduler.set_timesteps(a_ ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase_ : Optional[Any] = model(a_ , a_ ) lowerCamelCase_ : Optional[int] = scheduler.step(a_ , a_ , a_ ).prev_sample return sample def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = dict(self.forward_default_kwargs ) lowerCamelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , a_ ) for scheduler_class in self.scheduler_classes: lowerCamelCase_ : Any = self.get_scheduler_config() lowerCamelCase_ : Any = scheduler_class(**a_ ) lowerCamelCase_ : str = self.dummy_sample lowerCamelCase_ : int = 0.1 * sample if num_inference_steps is not None and hasattr(a_ , "set_timesteps" ): scheduler.set_timesteps(a_ ) elif num_inference_steps is not None and not hasattr(a_ , "set_timesteps" ): lowerCamelCase_ : Optional[Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCamelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10] lowerCamelCase_ : Dict = dummy_past_residuals[: scheduler.config.solver_order] lowerCamelCase_ : Optional[Any] = scheduler.timesteps[5] lowerCamelCase_ : str = scheduler.timesteps[6] lowerCamelCase_ : List[Any] = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample lowerCamelCase_ : Optional[int] = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() ) lowerCamelCase_ : int = self.full_loop(scheduler=a_ ) lowerCamelCase_ : Union[str, Any] = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3 lowerCamelCase_ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowerCamelCase_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowerCamelCase_ : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) lowerCamelCase_ : List[str] = DEISMultistepScheduler.from_config(scheduler.config ) lowerCamelCase_ : Tuple = self.full_loop(scheduler=a_ ) lowerCamelCase_ : Union[str, Any] = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3 def _UpperCamelCase ( self ): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=a_ ) def _UpperCamelCase ( self ): self.check_over_configs(thresholding=a_ ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=a_ , prediction_type=a_ , sample_max_value=a_ , algorithm_type="deis" , solver_order=a_ , solver_type=a_ , ) def _UpperCamelCase ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=a_ ) def _UpperCamelCase ( self ): for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=a_ , solver_type=a_ , prediction_type=a_ , algorithm_type=a_ , ) lowerCamelCase_ : Tuple = self.full_loop( solver_order=a_ , solver_type=a_ , prediction_type=a_ , algorithm_type=a_ , ) assert not torch.isnan(a_ ).any(), "Samples have nan numbers" def _UpperCamelCase ( self ): self.check_over_configs(lower_order_final=a_ ) self.check_over_configs(lower_order_final=a_ ) def _UpperCamelCase ( self ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=a_ , time_step=0 ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = self.full_loop() lowerCamelCase_ : str = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 0.2_39_16 ) < 1E-3 def _UpperCamelCase ( self ): lowerCamelCase_ : str = self.full_loop(prediction_type="v_prediction" ) lowerCamelCase_ : str = torch.mean(torch.abs(a_ ) ) assert abs(result_mean.item() - 0.0_91 ) < 1E-3 def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = self.scheduler_classes[0] lowerCamelCase_ : Optional[int] = self.get_scheduler_config(thresholding=a_ , dynamic_thresholding_ratio=0 ) lowerCamelCase_ : Dict = scheduler_class(**a_ ) lowerCamelCase_ : List[str] = 10 lowerCamelCase_ : int = self.dummy_model() lowerCamelCase_ : Optional[Any] = self.dummy_sample_deter.half() scheduler.set_timesteps(a_ ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase_ : int = model(a_ , a_ ) lowerCamelCase_ : Union[str, Any] = scheduler.step(a_ , a_ , a_ ).prev_sample assert sample.dtype == torch.floataa
706
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Dict = '''ClapFeatureExtractor''' __UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self , a_ , a_ ): super().__init__(a_ , a_ ) def __call__( self , a_=None , a_=None , a_=None , **a_ ): lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ ) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none." ) if text is not None: lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if audios is not None: lowerCamelCase_ : List[str] = self.feature_extractor( a_ , sampling_rate=a_ , return_tensors=a_ , **a_ ) if text is not None and audios is not None: lowerCamelCase_ : List[str] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.batch_decode(*a_ , **a_ ) def _UpperCamelCase ( self , *a_ , **a_ ): return self.tokenizer.decode(*a_ , **a_ ) @property def _UpperCamelCase ( self ): lowerCamelCase_ : int = self.tokenizer.model_input_names lowerCamelCase_ : Dict = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
73
0
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __lowerCamelCase = logging.getLogger() def a ( ): '''simple docstring''' UpperCAmelCase_ :Any = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCAmelCase_ :Dict = parser.parse_args() return args.f class _snake_case ( A__ ): '''simple docstring''' def snake_case_ ( self : List[str] ): UpperCAmelCase_ :int = logging.StreamHandler(sys.stdout ) logger.addHandler(snake_case ) def snake_case_ ( self : Tuple , snake_case : Dict ): UpperCAmelCase_ :int = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , '''run_glue_deebert.py''' ) with patch.object(snake_case , '''argv''' , snake_case ): UpperCAmelCase_ :List[str] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(snake_case , 0.666 ) @slow @require_torch_non_multi_gpu def snake_case_ ( self : Optional[Any] ): UpperCAmelCase_ :Optional[int] = ''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(snake_case ) UpperCAmelCase_ :Tuple = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(snake_case ) UpperCAmelCase_ :str = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(snake_case )
608
"""simple docstring""" import socket def a ( ): '''simple docstring''' UpperCAmelCase_ :Union[str, Any] = socket.socket(socket.AF_INET, socket.SOCK_STREAM ) UpperCAmelCase_ :int = socket.gethostname() UpperCAmelCase_ :List[Any] = 12312 sock.connect((host, port) ) sock.send(b'''Hello server!''' ) with open('''Received_file''', '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: UpperCAmelCase_ :int = sock.recv(1024 ) if not data: break out_file.write(__snake_case ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
608
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowerCamelCase : str = logging.get_logger(__name__) def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" if isinstance(lowerCAmelCase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase_ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase_ ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class UpperCAmelCase ( _lowercase ): UpperCAmelCase : int = ['''pixel_values'''] def __init__(self : Dict , A__ : bool = True , A__ : Dict[str, int] = None , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_5_5 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , **A__ : Any , ) -> None: super().__init__(**A__ ) lowercase = size if size is not None else {"shortest_edge": 2_2_4} lowercase = get_size_dict(A__ , default_to_square=A__ ) lowercase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} lowercase = get_size_dict(A__ , param_name="crop_size" ) lowercase = do_resize lowercase = size lowercase = do_center_crop lowercase = crop_size lowercase = resample lowercase = do_rescale lowercase = rescale_factor lowercase = do_normalize lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase__ (self : Optional[Any] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BILINEAR , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Optional[int] , ) -> np.ndarray: lowercase = get_size_dict(A__ , default_to_square=A__ ) if "shortest_edge" in size: lowercase = get_resize_output_image_size(A__ , size["shortest_edge"] , default_to_square=A__ ) elif "height" in size and "width" in size: lowercase = (size["height"], size["width"]) else: raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ ) def UpperCAmelCase__ (self : List[str] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : str , ) -> np.ndarray: lowercase = get_size_dict(A__ ) if "height" not in size or "width" not in size: raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ ) def UpperCAmelCase__ (self : List[Any] , A__ : np.ndarray , A__ : Union[int, float] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Tuple , ) -> Optional[int]: return rescale(A__ , scale=A__ , data_format=A__ , **A__ ) def UpperCAmelCase__ (self : List[str] , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[Any] , ) -> np.ndarray: return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ ) def UpperCAmelCase__ (self : Any , A__ : ImageInput , A__ : bool = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : bool = None , A__ : float = None , A__ : bool = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowercase = to_numpy_array(A__ ) if do_resize: lowercase = self.resize(image=A__ , size=A__ , resample=A__ ) if do_center_crop: lowercase = self.center_crop(A__ , size=A__ ) if do_rescale: lowercase = self.rescale(image=A__ , scale=A__ ) if do_normalize: lowercase = self.normalize(image=A__ , mean=A__ , std=A__ ) lowercase = to_channel_dimension_format(A__ , A__ ) return image def UpperCAmelCase__ (self : Dict , A__ : ImageInput , A__ : bool = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : Dict[str, int] = None , A__ : bool = None , A__ : float = None , A__ : bool = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[str, TensorType]] = None , A__ : ChannelDimension = ChannelDimension.FIRST , **A__ : Optional[Any] , ) -> PIL.Image.Image: lowercase = do_resize if do_resize is not None else self.do_resize lowercase = resample if resample is not None else self.resample lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase = do_rescale if do_rescale is not None else self.do_rescale lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase = do_normalize if do_normalize is not None else self.do_normalize lowercase = image_mean if image_mean is not None else self.image_mean lowercase = image_std if image_std is not None else self.image_std lowercase = size if size is not None else self.size lowercase = get_size_dict(A__ , default_to_square=A__ ) lowercase = crop_size if crop_size is not None else self.crop_size lowercase = get_size_dict(A__ , param_name="crop_size" ) if not valid_images(A__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) lowercase = make_batched(A__ ) lowercase = [ [ self._preprocess_image( image=A__ , do_resize=A__ , size=A__ , resample=A__ , do_center_crop=A__ , crop_size=A__ , do_rescale=A__ , rescale_factor=A__ , do_normalize=A__ , image_mean=A__ , image_std=A__ , data_format=A__ , ) for img in video ] for video in videos ] lowercase = {"pixel_values": videos} return BatchFeature(data=A__ , tensor_type=A__ )
719
'''simple docstring''' import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : List[Any] = "https://openaipublic.azureedge.net/jukebox/models/" __lowerCamelCase : Tuple = { "jukebox-1b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "1b_lyrics/prior_level_2.pth.tar", ], "jukebox-5b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "5b_lyrics/prior_level_2.pth.tar", ], } def UpperCAmelCase_ ( lowerCAmelCase_ ): """simple docstring""" if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10: lowercase = key.replace(".model.1.bias" , ".conv1d_1.bias" ) elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10: lowercase = key.replace(".model.1.weight" , ".conv1d_1.weight" ) elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10: lowercase = key.replace(".model.3.bias" , ".conv1d_2.bias" ) elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10: lowercase = key.replace(".model.3.weight" , ".conv1d_2.weight" ) if "conditioner_blocks.0." in key: lowercase = key.replace("conditioner_blocks.0" , "conditioner_blocks" ) if "prime_prior" in key: lowercase = key.replace("prime_prior" , "encoder" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowercase = key.replace(".emb." , "." ) if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(".k" , ".codebook" ) if "y_emb." in key: return key.replace("y_emb." , "metadata_embedding." ) if "x_emb.emb." in key: lowercase = key.replace("0.x_emb.emb" , "embed_tokens" ) if "prime_state_ln" in key: return key.replace("prime_state_ln" , "encoder.final_layer_norm" ) if ".ln" in key: return key.replace(".ln" , ".layer_norm" ) if "_ln" in key: return key.replace("_ln" , "_layer_norm" ) if "prime_state_proj" in key: return key.replace("prime_state_proj" , "encoder.proj_in" ) if "prime_x_out" in key: return key.replace("prime_x_out" , "encoder.lm_head" ) if "prior.x_out" in key: return key.replace("x_out" , "fc_proj_out" ) if "x_emb" in key: return key.replace("x_emb" , "embed_tokens" ) return key def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" lowercase = {} import re lowercase = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) lowercase = re.compile( R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) lowercase = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) lowercase = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) lowercase = re.compile( R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) lowercase = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) lowercase = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" ) lowercase = re.compile( R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) lowercase = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(lowerCAmelCase_ ): lowercase = re_encoder_block_conv_in.match(lowerCAmelCase_ ) lowercase = regex_match.groups() lowercase = int(groups[2] ) * 2 + int(groups[3] ) lowercase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}' lowercase = re_encoder_block_conv_in.sub(lowerCAmelCase_ , lowerCAmelCase_ ) elif re_encoder_block_resnet.fullmatch(lowerCAmelCase_ ): lowercase = re_encoder_block_resnet.match(lowerCAmelCase_ ) lowercase = regex_match.groups() lowercase = int(groups[2] ) * 2 + int(groups[3] ) lowercase = {"1": 1, "3": 2}[groups[-2]] lowercase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.' lowercase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' lowercase = prefix + resnet_block lowercase = re_encoder_block_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ ) elif re_encoder_block_proj_out.fullmatch(lowerCAmelCase_ ): lowercase = re_encoder_block_proj_out.match(lowerCAmelCase_ ) lowercase = regex_match.groups() lowercase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}' lowercase = re_encoder_block_proj_out.sub(lowerCAmelCase_ , lowerCAmelCase_ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(lowerCAmelCase_ ): lowercase = re_decoder_block_conv_out.match(lowerCAmelCase_ ) lowercase = regex_match.groups() lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2 lowercase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}' lowercase = re_decoder_block_conv_out.sub(lowerCAmelCase_ , lowerCAmelCase_ ) elif re_decoder_block_resnet.fullmatch(lowerCAmelCase_ ): lowercase = re_decoder_block_resnet.match(lowerCAmelCase_ ) lowercase = regex_match.groups() lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2 lowercase = {"1": 1, "3": 2}[groups[-2]] lowercase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.' lowercase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' lowercase = prefix + resnet_block lowercase = re_decoder_block_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ ) elif re_decoder_block_proj_in.fullmatch(lowerCAmelCase_ ): lowercase = re_decoder_block_proj_in.match(lowerCAmelCase_ ) lowercase = regex_match.groups() lowercase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}' lowercase = re_decoder_block_proj_in.sub(lowerCAmelCase_ , lowerCAmelCase_ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(lowerCAmelCase_ ): lowercase = re_prior_cond_conv_out.match(lowerCAmelCase_ ) lowercase = regex_match.groups() lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2 lowercase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}' lowercase = re_prior_cond_conv_out.sub(lowerCAmelCase_ , lowerCAmelCase_ ) elif re_prior_cond_resnet.fullmatch(lowerCAmelCase_ ): lowercase = re_prior_cond_resnet.match(lowerCAmelCase_ ) lowercase = regex_match.groups() lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2 lowercase = {"1": 1, "3": 2}[groups[-2]] lowercase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.' lowercase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' lowercase = prefix + resnet_block lowercase = re_prior_cond_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ ) elif re_prior_cond_proj_in.fullmatch(lowerCAmelCase_ ): lowercase = re_prior_cond_proj_in.match(lowerCAmelCase_ ) lowercase = regex_match.groups() lowercase = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}' lowercase = re_prior_cond_proj_in.sub(lowerCAmelCase_ , lowerCAmelCase_ ) # keep original key else: lowercase = original_key lowercase = replace_key(lowerCAmelCase_ ) if f'{key_prefix}.{key}' not in model_state_dict or key is None: print(f'failed converting {original_key} to {key}, does not match' ) # handle missmatched shape elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape: lowercase = model_state_dict[f'{key_prefix}.{key}'] print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' ) lowercase = original_key lowercase = original_key lowercase = value return new_dict @torch.no_grad() def UpperCAmelCase_ ( lowerCAmelCase_=None , lowerCAmelCase_=None ): """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ): lowercase = requests.get(f'{PREFIX}{file}' , allow_redirects=lowerCAmelCase_ ) os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=lowerCAmelCase_ ) open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , "wb" ).write(r.content ) lowercase = MODEL_MAPPING[model_name.split("/" )[-1]] lowercase = JukeboxConfig.from_pretrained(lowerCAmelCase_ ) lowercase = JukeboxModel(lowerCAmelCase_ ) lowercase = [] lowercase = {} for i, dict_name in enumerate(lowerCAmelCase_ ): lowercase = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["model"] lowercase = {} for k in old_dic.keys(): if k.endswith(".b" ): lowercase = old_dic[k] elif k.endswith(".w" ): lowercase = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowercase = old_dic[k] else: lowercase = old_dic[k] lowercase = "vqvae" if i == 0 else f'priors.{3 - i}' lowercase = fix_jukebox_keys(lowerCAmelCase_ , model.state_dict() , lowerCAmelCase_ , lowerCAmelCase_ ) weight_dict.append(lowerCAmelCase_ ) lowercase = weight_dict.pop(0 ) model.vqvae.load_state_dict(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) with open(f'{pytorch_dump_folder_path}/mapping.json' , "w" ) as txtfile: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCAmelCase_ ) return weight_dict if __name__ == "__main__": __lowerCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="jukebox-5b-lyrics", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="jukebox-5b-lyrics-converted", type=str, help="Path to the output PyTorch model directory.", ) __lowerCamelCase : List[str] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
459
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : Any = { "bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json", } class __snake_case ( SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE__ = 'gpt_bigcode' SCREAMING_SNAKE_CASE__ = ['past_key_values'] SCREAMING_SNAKE_CASE__ = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self ,a_=5_0257 ,a_=1024 ,a_=768 ,a_=12 ,a_=12 ,a_=None ,a_="gelu_pytorch_tanh" ,a_=0.1 ,a_=0.1 ,a_=0.1 ,a_=1e-5 ,a_=0.02 ,a_=True ,a_=True ,a_=5_0256 ,a_=5_0256 ,a_=True ,a_=True ,a_=True ,**a_ ,): """simple docstring""" lowerCAmelCase__ = vocab_size lowerCAmelCase__ = n_positions lowerCAmelCase__ = n_embd lowerCAmelCase__ = n_layer lowerCAmelCase__ = n_head lowerCAmelCase__ = n_inner lowerCAmelCase__ = activation_function lowerCAmelCase__ = resid_pdrop lowerCAmelCase__ = embd_pdrop lowerCAmelCase__ = attn_pdrop lowerCAmelCase__ = layer_norm_epsilon lowerCAmelCase__ = initializer_range lowerCAmelCase__ = scale_attn_weights lowerCAmelCase__ = use_cache lowerCAmelCase__ = attention_softmax_in_fpaa lowerCAmelCase__ = scale_attention_softmax_in_fpaa lowerCAmelCase__ = multi_query lowerCAmelCase__ = bos_token_id lowerCAmelCase__ = eos_token_id super().__init__(bos_token_id=a_ ,eos_token_id=a_ ,**a_ )
193
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar _lowerCAmelCase : List[str] = TypeVar("T") class __snake_case ( Generic[T] ): def __init__( self ,a_ ): """simple docstring""" lowerCAmelCase__ = data lowerCAmelCase__ = None def __str__( self ): """simple docstring""" return f'{self.data}' class __snake_case ( Generic[T] ): def __init__( self ): """simple docstring""" lowerCAmelCase__ = None def __iter__( self ): """simple docstring""" lowerCAmelCase__ = self.top while node: yield node.data lowerCAmelCase__ = node.next def __str__( self ): """simple docstring""" return "->".join([str(a_ ) for item in self] ) def __len__( self ): """simple docstring""" return len(tuple(iter(self ) ) ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" return self.top is None def SCREAMING_SNAKE_CASE_ ( self ,a_ ): """simple docstring""" lowerCAmelCase__ = Node(a_ ) if not self.is_empty(): lowerCAmelCase__ = self.top lowerCAmelCase__ = node def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top ,a_ ) lowerCAmelCase__ = self.top lowerCAmelCase__ = self.top.next return pop_node.data def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ = None if __name__ == "__main__": from doctest import testmod testmod()
193
1
'''simple docstring''' import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: """simple docstring""" assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: """simple docstring""" snake_case: int =tmp_path / 'cache' snake_case: Tuple ={'text': 'string'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case: List[Any] =TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read() _check_text_dataset(__UpperCAmelCase , __UpperCAmelCase ) @pytest.mark.parametrize( 'features' , [ None, {'text': 'string'}, {'text': 'int32'}, {'text': 'float32'}, ] , ) def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: """simple docstring""" snake_case: Optional[int] =tmp_path / 'cache' snake_case: Optional[Any] ={'text': 'string'} snake_case: List[Any] =features.copy() if features else default_expected_features snake_case: List[str] =( Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case: int =TextDatasetReader(__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read() _check_text_dataset(__UpperCAmelCase , __UpperCAmelCase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: """simple docstring""" snake_case: Union[str, Any] =tmp_path / 'cache' snake_case: List[str] ={'text': 'string'} snake_case: Any =TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase , split=__UpperCAmelCase ).read() _check_text_dataset(__UpperCAmelCase , __UpperCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: """simple docstring""" if issubclass(__UpperCAmelCase , __UpperCAmelCase ): snake_case: Tuple =text_path elif issubclass(__UpperCAmelCase , __UpperCAmelCase ): snake_case: int =[text_path] snake_case: Dict =tmp_path / 'cache' snake_case: List[Any] ={'text': 'string'} snake_case: Optional[Any] =TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read() _check_text_dataset(__UpperCAmelCase , __UpperCAmelCase ) def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=("train",) ) -> Optional[int]: """simple docstring""" assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) for split in splits: snake_case: Optional[Any] =dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: """simple docstring""" snake_case: Tuple =tmp_path / 'cache' snake_case: Optional[Any] ={'text': 'string'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case: List[Any] =TextDatasetReader({'train': text_path} , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read() _check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase ) @pytest.mark.parametrize( 'features' , [ None, {'text': 'string'}, {'text': 'int32'}, {'text': 'float32'}, ] , ) def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: """simple docstring""" snake_case: Optional[int] =tmp_path / 'cache' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" snake_case: str ={'text': 'string'} snake_case: str =features.copy() if features else default_expected_features snake_case: int =( Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case: Optional[Any] =TextDatasetReader({'train': text_path} , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read() _check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: """simple docstring""" if split: snake_case: List[Any] ={split: text_path} else: snake_case: Any ='train' snake_case: Optional[Any] ={'train': text_path, 'test': text_path} snake_case: Optional[Any] =tmp_path / 'cache' snake_case: Any ={'text': 'string'} snake_case: Optional[int] =TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read() _check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
713
'''simple docstring''' def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: """simple docstring""" if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) snake_case: Optional[Any] =str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b" snake_case: Union[str, Any] =str(bin(__UpperCAmelCase ) )[2:] snake_case: List[Any] =max(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) return "0b" + "".join( str(int('1' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(__UpperCAmelCase ) , b_binary.zfill(__UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
347
0
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } lowercase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def lowerCamelCase_ ( UpperCamelCase__ : List[Any], UpperCamelCase__ : Any=False ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = create_model( '''HTSAT-tiny''', '''roberta''', UpperCamelCase__, precision='''fp32''', device='''cuda:0''' if torch.cuda.is_available() else '''cpu''', enable_fusion=UpperCamelCase__, fusion_type='''aff_2d''' if enable_fusion else None, ) return model, model_cfg def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ): '''simple docstring''' UpperCamelCase__ = {} UpperCamelCase__ = r'''.*sequential.(\d+).*''' UpperCamelCase__ = r'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCamelCase__ = key.replace(UpperCamelCase__, UpperCamelCase__ ) if re.match(UpperCamelCase__, UpperCamelCase__ ): # replace sequential layers with list UpperCamelCase__ = re.match(UpperCamelCase__, UpperCamelCase__ ).group(1 ) UpperCamelCase__ = key.replace(F"""sequential.{sequential_layer}.""", F"""layers.{int(UpperCamelCase__ )//3}.linear.""" ) elif re.match(UpperCamelCase__, UpperCamelCase__ ): UpperCamelCase__ = int(re.match(UpperCamelCase__, UpperCamelCase__ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... UpperCamelCase__ = 1 if projecton_layer == 0 else 2 UpperCamelCase__ = key.replace(F"""_projection.{projecton_layer}.""", F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value UpperCamelCase__ = value UpperCamelCase__ = mixed_qkv.size(0 ) // 3 UpperCamelCase__ = mixed_qkv[:qkv_dim] UpperCamelCase__ = mixed_qkv[qkv_dim : qkv_dim * 2] UpperCamelCase__ = mixed_qkv[qkv_dim * 2 :] UpperCamelCase__ = query_layer UpperCamelCase__ = key_layer UpperCamelCase__ = value_layer else: UpperCamelCase__ = value return model_state_dict def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int=False ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = init_clap(UpperCamelCase__, enable_fusion=UpperCamelCase__ ) clap_model.eval() UpperCamelCase__ = clap_model.state_dict() UpperCamelCase__ = rename_state_dict(UpperCamelCase__ ) UpperCamelCase__ = ClapConfig() UpperCamelCase__ = enable_fusion UpperCamelCase__ = ClapModel(UpperCamelCase__ ) # ignore the spectrogram embedding layer model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) transformers_config.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": lowercase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") lowercase = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
240
import random def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : List[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = [], [], [] for element in data: if element < pivot: less.append(UpperCamelCase__ ) elif element > pivot: greater.append(UpperCamelCase__ ) else: equal.append(UpperCamelCase__ ) return less, equal, greater def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : int ): '''simple docstring''' if index >= len(UpperCamelCase__ ) or index < 0: return None UpperCamelCase__ = items[random.randint(0, len(UpperCamelCase__ ) - 1 )] UpperCamelCase__ = 0 UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = _partition(UpperCamelCase__, UpperCamelCase__ ) UpperCamelCase__ = len(UpperCamelCase__ ) UpperCamelCase__ = len(UpperCamelCase__ ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(UpperCamelCase__, UpperCamelCase__ ) # must be in larger else: return quick_select(UpperCamelCase__, index - (m + count) )
240
1
import os import sys import unittest _UpperCAmelCase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _UpperCAmelCase : Union[str, Any] = os.path.join(git_repo_path, """src""", """diffusers""") class lowerCAmelCase ( unittest.TestCase ): def A_ ( self : Union[str, Any] ) -> int: lowerCamelCase__ : int = find_backend(' if not is_torch_available():' ) self.assertEqual(UpperCAmelCase , 'torch' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") lowerCamelCase__ : List[str] = find_backend(' if not (is_torch_available() and is_transformers_available()):' ) self.assertEqual(UpperCAmelCase , 'torch_and_transformers' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") lowerCamelCase__ : Union[str, Any] = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' ) self.assertEqual(UpperCAmelCase , 'torch_and_transformers_and_onnx' ) def A_ ( self : str ) -> int: lowerCamelCase__ : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , UpperCAmelCase ) self.assertIn('torch_and_transformers' , UpperCAmelCase ) self.assertIn('flax_and_transformers' , UpperCAmelCase ) self.assertIn('torch_and_transformers_and_onnx' , UpperCAmelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch'] ) self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] ) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] ) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] ) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] ) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] ) def A_ ( self : Optional[int] ) -> Union[str, Any]: lowerCamelCase__ : Optional[Any] = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(UpperCAmelCase , '\nCONSTANT = None\n' ) lowerCamelCase__ : List[Any] = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( UpperCAmelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) lowerCamelCase__ : Any = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n' lowerCamelCase__ : Any = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def A_ ( self : Any ) -> List[str]: lowerCamelCase__ : Union[str, Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n' lowerCamelCase__ : List[str] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , UpperCAmelCase )
188
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants _UpperCAmelCase : int = Mapping[str, np.ndarray] _UpperCAmelCase : List[Any] = Mapping[str, Any] # Is a nested dict. _UpperCAmelCase : Dict = 0.01 @dataclasses.dataclass(frozen=__UpperCamelCase ) class lowerCAmelCase : UpperCAmelCase__ = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. UpperCAmelCase__ = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. UpperCAmelCase__ = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. UpperCAmelCase__ = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. UpperCAmelCase__ = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions UpperCAmelCase__ = None # Optional remark about the protein. Included as a comment in output PDB # files UpperCAmelCase__ = None # Templates used to generate this protein (prediction-only) UpperCAmelCase__ = None # Chain corresponding to each parent UpperCAmelCase__ = None def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Protein: lowerCamelCase__ : Optional[int] = r'(\[[A-Z]+\]\n)' lowerCamelCase__ : List[str] = [tag.strip() for tag in re.split(_UpperCAmelCase , _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0] lowerCamelCase__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] ) lowerCamelCase__ : List[str] = ["N", "CA", "C"] lowerCamelCase__ : Dict = None lowerCamelCase__ : str = None lowerCamelCase__ : int = None for g in groups: if "[PRIMARY]" == g[0]: lowerCamelCase__ : int = g[1][0].strip() for i in range(len(_UpperCAmelCase ) ): if seq[i] not in residue_constants.restypes: lowerCamelCase__ : Union[str, Any] = 'X' # FIXME: strings are immutable lowerCamelCase__ : Union[str, Any] = np.array( [residue_constants.restype_order.get(_UpperCAmelCase , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: lowerCamelCase__ : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(_UpperCAmelCase , g[1][axis].split() ) ) ) lowerCamelCase__ : int = np.array(_UpperCAmelCase ) lowerCamelCase__ : Tuple = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(_UpperCAmelCase ): lowerCamelCase__ : int = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: lowerCamelCase__ : int = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) ) lowerCamelCase__ : List[Any] = np.zeros( ( len(_UpperCAmelCase ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(_UpperCAmelCase ): lowerCamelCase__ : Union[str, Any] = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=_UpperCAmelCase , atom_mask=_UpperCAmelCase , aatype=_UpperCAmelCase , residue_index=np.arange(len(_UpperCAmelCase ) ) , b_factors=_UpperCAmelCase , ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 0 ) -> List[str]: lowerCamelCase__ : List[str] = [] lowerCamelCase__ : Dict = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) lowerCamelCase__ : str = prot.parents lowerCamelCase__ : Union[str, Any] = prot.parents_chain_index if parents is not None and parents_chain_index is not None: lowerCamelCase__ : Any = [p for i, p in zip(_UpperCAmelCase , _UpperCAmelCase ) if i == chain_id] if parents is None or len(_UpperCAmelCase ) == 0: lowerCamelCase__ : List[Any] = ['N/A'] pdb_headers.append(F"""PARENT {" ".join(_UpperCAmelCase )}""" ) return pdb_headers def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> str: lowerCamelCase__ : List[str] = [] lowerCamelCase__ : str = pdb_str.split('\n' ) lowerCamelCase__ : int = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) lowerCamelCase__ : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: lowerCamelCase__ : List[Any] = [] if prot.parents_chain_index is not None: lowerCamelCase__ : Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(_UpperCAmelCase ) , [] ) parent_dict[str(_UpperCAmelCase )].append(_UpperCAmelCase ) lowerCamelCase__ : str = max([int(_UpperCAmelCase ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): lowerCamelCase__ : Optional[Any] = parent_dict.get(str(_UpperCAmelCase ) , ['N/A'] ) parents_per_chain.append(_UpperCAmelCase ) else: parents_per_chain.append(list(prot.parents ) ) else: lowerCamelCase__ : Union[str, Any] = [['N/A']] def make_parent_line(_UpperCAmelCase ) -> str: return F"""PARENT {" ".join(_UpperCAmelCase )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) lowerCamelCase__ : List[Any] = 0 for i, l in enumerate(_UpperCAmelCase ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(_UpperCAmelCase ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(_UpperCAmelCase ): lowerCamelCase__ : Union[str, Any] = parents_per_chain[chain_counter] else: lowerCamelCase__ : Optional[Any] = ['N/A'] out_pdb_lines.append(make_parent_line(_UpperCAmelCase ) ) return "\n".join(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str: lowerCamelCase__ : Tuple = residue_constants.restypes + ['X'] def res_atoa(_UpperCAmelCase ) -> str: return residue_constants.restype_atoa.get(restypes[r] , 'UNK' ) lowerCamelCase__ : int = residue_constants.atom_types lowerCamelCase__ : List[str] = [] lowerCamelCase__ : Union[str, Any] = prot.atom_mask lowerCamelCase__ : Union[str, Any] = prot.aatype lowerCamelCase__ : int = prot.atom_positions lowerCamelCase__ : List[Any] = prot.residue_index.astype(np.intaa ) lowerCamelCase__ : Optional[int] = prot.b_factors lowerCamelCase__ : Any = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('Invalid aatypes.' ) lowerCamelCase__ : List[Any] = get_pdb_headers(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: pdb_lines.extend(_UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = aatype.shape[0] lowerCamelCase__ : Optional[Any] = 1 lowerCamelCase__ : str = 0 lowerCamelCase__ : Tuple = string.ascii_uppercase lowerCamelCase__ : str = None # Add all atom sites. for i in range(_UpperCAmelCase ): lowerCamelCase__ : List[Any] = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(_UpperCAmelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue lowerCamelCase__ : Union[str, Any] = 'ATOM' lowerCamelCase__ : Optional[int] = atom_name if len(_UpperCAmelCase ) == 4 else F""" {atom_name}""" lowerCamelCase__ : Any = '' lowerCamelCase__ : Optional[Any] = '' lowerCamelCase__ : str = 1.00 lowerCamelCase__ : str = atom_name[0] # Protein supports only C, N, O, S, this works. lowerCamelCase__ : List[str] = '' lowerCamelCase__ : str = 'A' if chain_index is not None: lowerCamelCase__ : List[Any] = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! lowerCamelCase__ : Union[str, Any] = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(_UpperCAmelCase ) atom_index += 1 lowerCamelCase__ : Dict = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: lowerCamelCase__ : List[Any] = True lowerCamelCase__ : List[str] = chain_index[i + 1] if should_terminate: # Close the chain. lowerCamelCase__ : int = 'TER' lowerCamelCase__ : Any = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(_UpperCAmelCase ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(_UpperCAmelCase , _UpperCAmelCase ) ) pdb_lines.append('END' ) pdb_lines.append('' ) return "\n".join(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> np.ndarray: return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ) -> Protein: return Protein( aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_UpperCAmelCase , remark=_UpperCAmelCase , parents=_UpperCAmelCase , parents_chain_index=_UpperCAmelCase , )
188
1
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class UpperCamelCase_ ( yaml.SafeLoader ): def _snake_case ( self :List[str] , __A :List[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.constructed_objects[key_node] for key_node, _ in node.value] SCREAMING_SNAKE_CASE__ = [tuple(__A ) if isinstance(__A , __A ) else key for key in keys] SCREAMING_SNAKE_CASE__ = Counter(__A ) SCREAMING_SNAKE_CASE__ = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' ) def _snake_case ( self :Optional[int] , __A :List[Any] , __A :Union[str, Any]=False ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = super().construct_mapping(__A , deep=__A ) self._check_no_duplicates_on_constructed_node(__A ) return mapping def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: SCREAMING_SNAKE_CASE__ = full_content[1:].index("""---""" ) + 1 SCREAMING_SNAKE_CASE__ = """\n""".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(UpperCamelCase__ ) class UpperCamelCase_ ( UpperCamelCase__ ): # class attributes lowerCamelCase_ = {"train_eval_index"} # train-eval-index in the YAML metadata @classmethod def _snake_case ( cls :int , __A :Path ) -> "DatasetMetadata": """simple docstring""" with open(__A , encoding="""utf-8""" ) as readme_file: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(__A ) else: return cls() def _snake_case ( self :int , __A :Path ) -> str: """simple docstring""" if path.exists(): with open(__A , encoding="""utf-8""" ) as readme_file: SCREAMING_SNAKE_CASE__ = readme_file.read() else: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = self._to_readme(__A ) with open(__A , """w""" , encoding="""utf-8""" ) as readme_file: readme_file.write(__A ) def _snake_case ( self :int , __A :Optional[str] = None ) -> str: """simple docstring""" if readme_content is not None: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _split_yaml_from_readme(__A ) SCREAMING_SNAKE_CASE__ = """---\n""" + self.to_yaml_string() + """---\n""" + content else: SCREAMING_SNAKE_CASE__ = """---\n""" + self.to_yaml_string() + """---\n""" return full_content @classmethod def _snake_case ( cls :Tuple , __A :str ) -> "DatasetMetadata": """simple docstring""" SCREAMING_SNAKE_CASE__ = yaml.load(__A , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields SCREAMING_SNAKE_CASE__ = { (key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**__A ) def _snake_case ( self :Union[str, Any] ) -> str: """simple docstring""" return yaml.safe_dump( { (key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=__A , allow_unicode=__A , encoding="""utf-8""" , ).decode("""utf-8""" ) _lowerCamelCase = { 'image-classification': [], 'translation': [], 'image-segmentation': [], 'fill-mask': [], 'automatic-speech-recognition': [], 'token-classification': [], 'sentence-similarity': [], 'audio-classification': [], 'question-answering': [], 'summarization': [], 'zero-shot-classification': [], 'table-to-text': [], 'feature-extraction': [], 'other': [], 'multiple-choice': [], 'text-classification': [], 'text-to-image': [], 'text2text-generation': [], 'zero-shot-image-classification': [], 'tabular-classification': [], 'tabular-regression': [], 'image-to-image': [], 'tabular-to-text': [], 'unconditional-image-generation': [], 'text-retrieval': [], 'text-to-speech': [], 'object-detection': [], 'audio-to-audio': [], 'text-generation': [], 'conversational': [], 'table-question-answering': [], 'visual-question-answering': [], 'image-to-text': [], 'reinforcement-learning': [], 'voice-activity-detection': [], 'time-series-forecasting': [], 'document-question-answering': [], } if __name__ == "__main__": from argparse import ArgumentParser _lowerCamelCase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.') ap.add_argument('readme_filepath') _lowerCamelCase = ap.parse_args() _lowerCamelCase = Path(args.readme_filepath) _lowerCamelCase = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
6
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "OwlViTImageProcessor" lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __A , ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" ) SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__A , __A ) def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )): SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )] elif isinstance(__A , __A ) and isinstance(text[0] , __A ): SCREAMING_SNAKE_CASE__ = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__A ) != max_num_queries: SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A )) SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A ) encodings.append(__A ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = input_ids SCREAMING_SNAKE_CASE__ = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = self.image_processor( __A , return_tensors=__A , **__A ).pixel_values SCREAMING_SNAKE_CASE__ = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]: """simple docstring""" return self.image_processor.post_process(*__A , **__A ) def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*__A , **__A ) def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*__A , **__A ) def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :List[Any] ) -> Optional[int]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , ) return self.image_processor_class @property def _snake_case ( self :Any ) -> Optional[Any]: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , ) return self.image_processor
6
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available lowercase : int = { '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[int] = ['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys lowercase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
707
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowercase : Optional[Any] = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowercase : Optional[Any] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowercase : Union[str, Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def lowerCAmelCase__ ( _a : str , _a : str ): snake_case_ : Union[str, Any] = len([g for position, g in enumerate(_a ) if g == main_target[position]] ) return (item, float(_a )) def lowerCAmelCase__ ( _a : str , _a : str ): snake_case_ : Tuple = random.randint(0 , len(_a ) - 1 ) snake_case_ : Any = parent_a[:random_slice] + parent_a[random_slice:] snake_case_ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def lowerCAmelCase__ ( _a : str , _a : list[str] ): snake_case_ : str = list(_a ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: snake_case_ : Optional[Any] = random.choice(_a ) return "".join(_a ) def lowerCAmelCase__ ( _a : tuple[str, float] , _a : list[tuple[str, float]] , _a : list[str] , ): snake_case_ : Tuple = [] # Generate more children proportionally to the fitness score. snake_case_ : Optional[int] = int(parent_a[1] * 1_00 ) + 1 snake_case_ : Tuple = 10 if child_n >= 10 else child_n for _ in range(_a ): snake_case_ : int = population_score[random.randint(0 , _a )][0] snake_case_ , snake_case_ : Any = crossover(parent_a[0] , _a ) # Append new string to the population list. pop.append(mutate(_a , _a ) ) pop.append(mutate(_a , _a ) ) return pop def lowerCAmelCase__ ( _a : str , _a : list[str] , _a : bool = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: snake_case_ : Any = F'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(_a ) # Verify that the target contains no genes besides the ones inside genes variable. snake_case_ : Optional[Any] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: snake_case_ : str = F'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(_a ) # Generate random starting population. snake_case_ : Optional[int] = [] for _ in range(_a ): population.append("".join([random.choice(_a ) for i in range(len(_a ) )] ) ) # Just some logs to know what the algorithms is doing. snake_case_ , snake_case_ : Dict = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_a ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. snake_case_ : Optional[int] = [evaluate(_a , _a ) for item in population] # Check if there is a matching evolution. snake_case_ : Any = sorted(_a , key=lambda _a : x[1] , reverse=_a ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'''\nGeneration: {generation}''' F'''\nTotal Population:{total_population}''' F'''\nBest score: {population_score[0][1]}''' F'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. snake_case_ : Union[str, Any] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_a ) # Normalize population score to be between 0 and 1. snake_case_ : int = [ (item, score / len(_a )) for item, score in population_score ] # This is selection for i in range(_a ): population.extend(select(population_score[int(_a )] , _a , _a ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_a ) > N_POPULATION: break if __name__ == "__main__": lowercase : Dict = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) lowercase : Optional[int] = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) lowercase ,lowercase ,lowercase : Optional[Any] = basic(target_str, genes_list) print( F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
114
0
import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : int ) -> Any: """simple docstring""" A__ = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } A__ = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 1_28, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 1_42, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(__lowerCAmelCase ) , __lowerCAmelCase ) def a_ ( self : str ) -> Optional[Any]: """simple docstring""" A__ = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , x.transpose() ) ) A__ = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def a_ ( self : int ) -> List[Any]: """simple docstring""" A__ = np.random.randn(3 , 4 ) A__ = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) A__ = np.random.randn(3 , 4 , 5 ) A__ = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_tf def a_ ( self : Dict ) -> List[Any]: """simple docstring""" A__ = np.random.randn(3 , 4 ) A__ = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , transpose(__lowerCAmelCase ).numpy() ) ) A__ = np.random.randn(3 , 4 , 5 ) A__ = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , transpose(__lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) ) @require_flax def a_ ( self : str ) -> List[Any]: """simple docstring""" A__ = np.random.randn(3 , 4 ) A__ = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase ) , np.asarray(transpose(__lowerCAmelCase ) ) ) ) A__ = np.random.randn(3 , 4 , 5 ) A__ = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__lowerCAmelCase , axes=(1, 2, 0) ) ) ) ) def a_ ( self : Tuple ) -> Any: """simple docstring""" A__ = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.reshape(__lowerCAmelCase , (4, 3) ) ) ) A__ = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , np.reshape(__lowerCAmelCase , (12, 5) ) ) ) @require_torch def a_ ( self : Tuple ) -> Any: """simple docstring""" A__ = np.random.randn(3 , 4 ) A__ = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) A__ = np.random.randn(3 , 4 , 5 ) A__ = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , reshape(__lowerCAmelCase , (12, 5) ).numpy() ) ) @require_tf def a_ ( self : List[Any] ) -> Dict: """simple docstring""" A__ = np.random.randn(3 , 4 ) A__ = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , reshape(__lowerCAmelCase , (4, 3) ).numpy() ) ) A__ = np.random.randn(3 , 4 , 5 ) A__ = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , reshape(__lowerCAmelCase , (12, 5) ).numpy() ) ) @require_flax def a_ ( self : List[str] ) -> Any: """simple docstring""" A__ = np.random.randn(3 , 4 ) A__ = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (4, 3) ) , np.asarray(reshape(__lowerCAmelCase , (4, 3) ) ) ) ) A__ = np.random.randn(3 , 4 , 5 ) A__ = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(reshape(__lowerCAmelCase , (12, 5) ) , np.asarray(reshape(__lowerCAmelCase , (12, 5) ) ) ) ) def a_ ( self : List[str] ) -> Optional[int]: """simple docstring""" A__ = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.squeeze(__lowerCAmelCase ) ) ) A__ = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.squeeze(__lowerCAmelCase , axis=2 ) ) ) @require_torch def a_ ( self : Any ) -> int: """simple docstring""" A__ = np.random.randn(1 , 3 , 4 ) A__ = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) A__ = np.random.randn(1 , 4 , 1 , 5 ) A__ = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_tf def a_ ( self : int ) -> Optional[int]: """simple docstring""" A__ = np.random.randn(1 , 3 , 4 ) A__ = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , squeeze(__lowerCAmelCase ).numpy() ) ) A__ = np.random.randn(1 , 4 , 1 , 5 ) A__ = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , squeeze(__lowerCAmelCase , axis=2 ).numpy() ) ) @require_flax def a_ ( self : int ) -> Union[str, Any]: """simple docstring""" A__ = np.random.randn(1 , 3 , 4 ) A__ = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase ) , np.asarray(squeeze(__lowerCAmelCase ) ) ) ) A__ = np.random.randn(1 , 4 , 1 , 5 ) A__ = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(squeeze(__lowerCAmelCase , axis=2 ) , np.asarray(squeeze(__lowerCAmelCase , axis=2 ) ) ) ) def a_ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" A__ = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.expand_dims(__lowerCAmelCase , axis=1 ) ) ) @require_torch def a_ ( self : List[Any] ) -> Any: """simple docstring""" A__ = np.random.randn(3 , 4 ) A__ = torch.tensor(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_tf def a_ ( self : int ) -> Optional[Any]: """simple docstring""" A__ = np.random.randn(3 , 4 ) A__ = tf.constant(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , expand_dims(__lowerCAmelCase , axis=1 ).numpy() ) ) @require_flax def a_ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ = np.random.randn(3 , 4 ) A__ = jnp.array(__lowerCAmelCase ) self.assertTrue(np.allclose(expand_dims(__lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(__lowerCAmelCase , axis=1 ) ) ) )
176
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
176
1
def lowerCamelCase__ ( _lowerCamelCase = 10**9 ): _UpperCAmelCase =1 _UpperCAmelCase =2 _UpperCAmelCase =0 _UpperCAmelCase =0 _UpperCAmelCase =0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value _UpperCAmelCase =2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F"""{solution() = }""")
700
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _a ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =tf.convert_to_tensor( [ [ 8.2_220_991, # 3rd highest value; idx. 0 -0.5_620_044, 5.23_229_752, 4.0_386_393, -6.8_798_378, -0.54_785_802, -3.2_012_153, 2.92_777_176, 1.88_171_953, 7.35_341_276, # 5th highest value; idx. 9 8.43_207_833, # 2nd highest value; idx. 10 -9.85_711_836, -5.96_209_236, -1.13_039_161, -7.1_115_294, -0.8_369_633, -5.3_186_408, 7.06_427_407, 0.81_369_344, -0.82_023_817, -5.9_179_796, 0.58_813_443, -6.99_778_438, 4.71_551_189, -0.18_771_637, 7.44_020_759, # 4th highest value; idx. 25 9.38_450_987, # 1st highest value; idx. 26 2.12_662_941, -9.32_562_038, 2.35_652_522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_425_518, 4.53_139_238, -5.57_510_464, -6.28_030_699, -7.19_529_503, -4.02_122_551, 1.39_337_037, -6.06_707_057, 1.59_480_517, -9.643_119, 0.03_907_799, 0.67_231_762, -8.88_206_726, 6.27_115_922, # 4th highest value; idx. 13 2.28_520_723, 4.82_767_506, 4.30_421_368, 8.8_275_313, # 2nd highest value; idx. 17 5.44_029_958, # 5th highest value; idx. 18 -4.4_735_794, 7.38_579_536, # 3rd highest value; idx. 20 -2.91_051_663, 2.61_946_077, -2.5_674_762, -9.48_959_302, -4.02_922_645, -1.35_416_918, 9.67_702_323, # 1st highest value; idx. 27 -5.89_478_553, 1.85_370_467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) _UpperCAmelCase =tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above _UpperCAmelCase =tf.convert_to_tensor( [8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above _UpperCAmelCase =tf_top_k_top_p_filtering(_snake_case , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) _UpperCAmelCase =output[output != -float("inf" )] _UpperCAmelCase =tf.cast( tf.where(tf.not_equal(_snake_case , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-1_2 ) tf.debugging.assert_equal(_snake_case , _snake_case ) @require_tf class _a ( unittest.TestCase , A__ ): """simple docstring""" if is_tf_available(): snake_case ={ """AutoModelForCausalLM""": TFAutoModelForCausalLM, """AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq, """AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM, """AutoModelForVision2Seq""": TFAutoModelForVisionaSeq, """LogitsProcessorList""": TFLogitsProcessorList, """MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor, """create_tensor_fn""": tf.convert_to_tensor, """floats_tensor""": floats_tensor, """return_tensors""": """tf""", } @slow def SCREAMING_SNAKE_CASE ( self ): # TF-only test: tf.saved_model export _UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCAmelCase =2 _UpperCAmelCase =2 class _a ( tf.Module ): """simple docstring""" def __init__( self , _snake_case ): super(_snake_case , self ).__init__() _UpperCAmelCase =model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ), tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ), ) , jit_compile=_snake_case , ) def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ): _UpperCAmelCase =self.model.generate( input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , ) return {"sequences": outputs["sequences"]} _UpperCAmelCase =[[2, 0], [102, 103]] _UpperCAmelCase =[[1, 0], [1, 1]] _UpperCAmelCase =DummyModel(model=_snake_case ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_snake_case , _snake_case , signatures={"serving_default": dummy_model.serving} ) _UpperCAmelCase =tf.saved_model.load(_snake_case ).signatures["serving_default"] for batch_size in range(1 , len(_snake_case ) + 1 ): _UpperCAmelCase ={ "input_ids": tf.constant(dummy_input_ids[:batch_size] ), "attention_mask": tf.constant(dummy_attention_masks[:batch_size] ), } _UpperCAmelCase =serving_func(**_snake_case )["sequences"] _UpperCAmelCase =test_model.generate(**_snake_case , max_new_tokens=_snake_case ) tf.debugging.assert_equal(_snake_case , _snake_case ) @slow def SCREAMING_SNAKE_CASE ( self ): # TF-only test: tf.saved_model export _UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCAmelCase =1 _UpperCAmelCase =2 class _a ( tf.Module ): """simple docstring""" def __init__( self , _snake_case ): super(_snake_case , self ).__init__() _UpperCAmelCase =model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ), tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ), ) , jit_compile=_snake_case , ) def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ): _UpperCAmelCase =self.model.generate( input_ids=_snake_case , attention_mask=_snake_case , max_new_tokens=_snake_case , return_dict_in_generate=_snake_case , ) return {"sequences": outputs["sequences"]} _UpperCAmelCase =[[2], [102, 103]] _UpperCAmelCase =[[1], [1, 1]] _UpperCAmelCase =DummyModel(model=_snake_case ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(_snake_case , _snake_case , signatures={"serving_default": dummy_model.serving} ) _UpperCAmelCase =tf.saved_model.load(_snake_case ).signatures["serving_default"] for input_row in range(len(_snake_case ) ): _UpperCAmelCase ={ "input_ids": tf.constant([dummy_input_ids[input_row]] ), "attention_mask": tf.constant([dummy_attention_masks[input_row]] ), } _UpperCAmelCase =serving_func(**_snake_case )["sequences"] _UpperCAmelCase =test_model.generate(**_snake_case , max_new_tokens=_snake_case ) tf.debugging.assert_equal(_snake_case , _snake_case ) @slow @require_tensorflow_text def SCREAMING_SNAKE_CASE ( self ): # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=_snake_case ) class _a ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self ): super().__init__() _UpperCAmelCase =text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(_snake_case , "spiece.model" ) , "rb" ).read() ) _UpperCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" ) def SCREAMING_SNAKE_CASE ( self , _snake_case , *_snake_case , **_snake_case ): _UpperCAmelCase =self.tokenizer.tokenize(_snake_case ) _UpperCAmelCase , _UpperCAmelCase =text.pad_model_inputs( _snake_case , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) _UpperCAmelCase =self.model.generate(input_ids=_snake_case , attention_mask=_snake_case ) return self.tokenizer.detokenize(_snake_case ) _UpperCAmelCase =CompleteSentenceTransformer() _UpperCAmelCase =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" ) _UpperCAmelCase =complete_model(_snake_case ) _UpperCAmelCase =tf.keras.Model(_snake_case , _snake_case ) keras_model.save(_snake_case ) def SCREAMING_SNAKE_CASE ( self ): # Has PT equivalent: this test relies on random sampling _UpperCAmelCase ={ "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } _UpperCAmelCase =14 _UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCAmelCase ="Hello, my dog is cute and" _UpperCAmelCase =tokenizer(_snake_case , return_tensors="tf" ) _UpperCAmelCase =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _UpperCAmelCase =638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0" ): tf.random.set_seed(0 ) _UpperCAmelCase =model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case ) self.assertTrue(expectation == len(generated_tokens[0] ) ) _UpperCAmelCase =[638, 198] with tf.device(":/CPU:0" ): tf.random.set_seed(0 ) _UpperCAmelCase =model.generate(**_snake_case , eos_token_id=_snake_case , **_snake_case ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def SCREAMING_SNAKE_CASE ( self ): # Has PT equivalent: ample use of framework-specific code _UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" ) _UpperCAmelCase ="Hugging Face is a technology company based in New York and Paris." _UpperCAmelCase =bart_tokenizer(_snake_case , return_tensors="tf" ).input_ids _UpperCAmelCase =TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" ) _UpperCAmelCase =bart_model.generate(_snake_case ).numpy() class _a ( A__ ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=None , **_snake_case ): return super().call(_snake_case , **_snake_case ) _UpperCAmelCase =FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" ) _UpperCAmelCase =bart_model.generate(_snake_case , foo="bar" ).numpy() self.assertTrue(np.array_equal(_snake_case , _snake_case ) ) class _a ( bart_model.model.encoder.__class__ ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self , _snake_case , **_snake_case ): return super().call(_snake_case , **_snake_case ) _UpperCAmelCase =FakeEncoder(bart_model.config , bart_model.model.shared ) _UpperCAmelCase =fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) _UpperCAmelCase =bart_model.generate(_snake_case ).numpy() with self.assertRaises(_snake_case ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(_snake_case , foo="bar" )
592
0
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def snake_case( __magic_name__ ) -> Optional[int]: '''simple docstring''' if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _A ( nn.Module ): def __init__( self : Union[str, Any] , _A : nn.Module , _A : int ) -> Dict: """simple docstring""" super().__init__() lowercase : str = module lowercase : List[Any] = nn.Sequential( nn.Linear(module.in_features , _A , bias=_A ) , nn.Linear(_A , module.out_features , bias=_A ) , ) lowercase : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_A ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def __a ( self : str , _A : Optional[Any] , *_A : str , **_A : List[str] ) -> Optional[int]: """simple docstring""" return self.module(_A , *_A , **_A ) + self.adapter(_A ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _A ( unittest.TestCase ): _UpperCamelCase : List[str] = '''bigscience/bloom-1b7''' # Constant values _UpperCamelCase : List[Any] = 2.1_09_65_95_52_69_25_74 _UpperCamelCase : Dict = '''Hello my name is''' _UpperCamelCase : Dict = set() EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' ) EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' ) EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' ) _UpperCamelCase : int = 1_0 def __a ( self : Dict ) -> Optional[Any]: """simple docstring""" lowercase : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class _A ( __A ): def __a ( self : Dict ) -> int: """simple docstring""" super().setUp() # Models and tokenizer lowercase : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) lowercase : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_A , device_map='''auto''' ) def __a ( self : List[str] ) -> Dict: """simple docstring""" del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __a ( self : Tuple ) -> Tuple: """simple docstring""" lowercase : Optional[int] = self.model_abit.config self.assertTrue(hasattr(_A , '''quantization_config''' ) ) lowercase : Union[str, Any] = config.to_dict() lowercase : str = config.to_diff_dict() lowercase : Union[str, Any] = config.to_json_string() def __a ( self : Dict ) -> Optional[int]: """simple docstring""" from bitsandbytes.nn import Paramsabit lowercase : int = self.model_fpaa.get_memory_footprint() lowercase : int = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) lowercase : Union[str, Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def __a ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_A , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def __a ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase : Optional[int] = self.tokenizer(self.input_text , return_tensors='''pt''' ) lowercase : Tuple = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_A ) , self.EXPECTED_OUTPUTS ) def __a ( self : Optional[int] ) -> int: """simple docstring""" lowercase : Tuple = BitsAndBytesConfig() lowercase : Optional[int] = True lowercase : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_A , device_map='''auto''' ) lowercase : int = self.tokenizer(self.input_text , return_tensors='''pt''' ) lowercase : Tuple = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_A ) , self.EXPECTED_OUTPUTS ) def __a ( self : Any ) -> str: """simple docstring""" with self.assertRaises(_A ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_A ) def __a ( self : List[str] ) -> Tuple: """simple docstring""" lowercase : int = BitsAndBytesConfig() with self.assertRaises(_A ): lowercase : List[str] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_A , load_in_abit=_A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def __a ( self : Any ) -> Tuple: """simple docstring""" with self.assertRaises(_A ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(_A ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_A ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(_A ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_A ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything lowercase : List[str] = self.tokenizer(self.input_text , return_tensors='''pt''' ) lowercase : Optional[Any] = self.model_fpaa.to(torch.floataa ) lowercase : Optional[Any] = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error lowercase : Tuple = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error lowercase : Dict = self.model_fpaa.half() # Check this does not throw an error lowercase : str = self.model_fpaa.float() def __a ( self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase : int = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=_A , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _A ( unittest.TestCase ): @classmethod def __a ( cls : str ) -> str: """simple docstring""" lowercase : List[str] = "t5-small" lowercase : Optional[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense lowercase : Any = AutoTokenizer.from_pretrained(cls.model_name ) lowercase : Optional[Any] = "Translate in German: Hello, my dog is cute" def __a ( self : Union[str, Any] ) -> Any: """simple docstring""" gc.collect() torch.cuda.empty_cache() def __a ( self : List[str] ) -> Union[str, Any]: """simple docstring""" from transformers import TaForConditionalGeneration lowercase : Any = TaForConditionalGeneration._keep_in_fpaa_modules lowercase : Dict = None # test with `t5-small` lowercase : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_A , device_map='''auto''' ) lowercase : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) lowercase : Any = model.generate(**_A ) # test with `flan-t5-small` lowercase : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_A , device_map='''auto''' ) lowercase : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) lowercase : Optional[int] = model.generate(**_A ) lowercase : Tuple = modules def __a ( self : int ) -> Dict: """simple docstring""" import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` lowercase : Optional[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_A , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) lowercase : List[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) lowercase : Optional[Any] = model.generate(**_A ) # test with `flan-t5-small` lowercase : List[str] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_A , device_map='''auto''' ) lowercase : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) lowercase : Tuple = model.generate(**_A ) class _A ( __A ): def __a ( self : Any ) -> Tuple: """simple docstring""" super().setUp() # model_name lowercase : Optional[int] = "bigscience/bloom-560m" lowercase : List[str] = "t5-small" # Different types of model lowercase : str = AutoModel.from_pretrained(self.model_name , load_in_abit=_A , device_map='''auto''' ) # Sequence classification model lowercase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_A , device_map='''auto''' ) # CausalLM model lowercase : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_A , device_map='''auto''' ) # Seq2seq model lowercase : List[str] = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_A , device_map='''auto''' ) def __a ( self : Optional[int] ) -> Dict: """simple docstring""" del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __a ( self : List[Any] ) -> List[str]: """simple docstring""" from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class _A ( __A ): def __a ( self : Dict ) -> Dict: """simple docstring""" super().setUp() def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" del self.pipe gc.collect() torch.cuda.empty_cache() def __a ( self : Dict ) -> Dict: """simple docstring""" lowercase : Optional[Any] = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass lowercase : Any = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class _A ( __A ): def __a ( self : List[Any] ) -> List[str]: """simple docstring""" super().setUp() def __a ( self : List[Any] ) -> Any: """simple docstring""" lowercase : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_A , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model lowercase : Tuple = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch lowercase : Tuple = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_A ) , self.EXPECTED_OUTPUTS ) class _A ( __A ): def __a ( self : List[str] ) -> Dict: """simple docstring""" lowercase : str = "facebook/opt-350m" super().setUp() def __a ( self : int ) -> List[Any]: """simple docstring""" if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters lowercase : Dict = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_A ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): lowercase : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability lowercase : Union[str, Any] = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_A ) ): lowercase : Optional[Any] = LoRALayer(module.q_proj , rank=16 ) lowercase : Optional[int] = LoRALayer(module.k_proj , rank=16 ) lowercase : Optional[int] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch lowercase : Dict = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): lowercase : str = model.forward(**_A ) out.logits.norm().backward() for module in model.modules(): if isinstance(_A , _A ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_A , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class _A ( __A ): _UpperCamelCase : int = '''gpt2-xl''' _UpperCamelCase : Optional[Any] = 3.31_91_85_48_54_15_21_87
217
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) a_ = logging.getLogger(__name__) a_ = 'Hello world! cécé herlolip' a_ = namedtuple( 'BertAbsConfig', [ 'temp_dir', 'large', 'use_bert_emb', 'finetune_bert', 'encoder', 'share_emb', 'max_pos', 'enc_layers', 'enc_hidden_size', 'enc_heads', 'enc_ff_size', 'enc_dropout', 'dec_layers', 'dec_hidden_size', 'dec_heads', 'dec_ff_size', 'dec_dropout', ], ) def lowerCamelCase__ ( _a , _a): SCREAMING_SNAKE_CASE : List[Any] = BertAbsConfig( temp_dir="." , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , ) SCREAMING_SNAKE_CASE : Dict = torch.load(_a , lambda _a , _a: storage) SCREAMING_SNAKE_CASE : str = AbsSummarizer(_a , torch.device("cpu") , _a) original.eval() SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(_a , torch.device("cpu")) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("convert the model") new_model.bert.load_state_dict(original.bert.state_dict()) new_model.decoder.load_state_dict(original.decoder.state_dict()) new_model.generator.load_state_dict(original.generator.state_dict()) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("Make sure that the models' outputs are identical") SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained("bert-base-uncased") # prepare the model inputs SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode("This is sample éàalj'-.") encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a))) SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("This is sample 3 éàalj'-.") decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a))) SCREAMING_SNAKE_CASE : int = torch.tensor(_a).unsqueeze(0) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0 # forward pass SCREAMING_SNAKE_CASE : List[Any] = encoder_input_ids SCREAMING_SNAKE_CASE : List[Any] = decoder_input_ids SCREAMING_SNAKE_CASE : Dict = None SCREAMING_SNAKE_CASE : Optional[Any] = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Dict = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical SCREAMING_SNAKE_CASE : Optional[int] = original(_a , _a , _a , _a , _a , _a , _a)[0] SCREAMING_SNAKE_CASE : Dict = original.generator(_a) SCREAMING_SNAKE_CASE : Any = new_model( _a , _a , _a , _a , _a)[0] SCREAMING_SNAKE_CASE : Tuple = new_model.generator(_a) SCREAMING_SNAKE_CASE : List[Any] = torch.max(torch.abs(output_converted_model - output_original_model)).item() print("Maximum absolute difference beween weights: {:.2f}".format(_a)) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator)).item() print("Maximum absolute difference beween weights: {:.2f}".format(_a)) SCREAMING_SNAKE_CASE : int = torch.allclose(_a , _a , atol=1E-3) if are_identical: logging.info("all weights are equal up to 1e-3") else: raise ValueError("the weights are different. The new model is likely different from the original one.") # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("saving the model's state dictionary") torch.save( new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin") if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--bertabs_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.', ) a_ = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
25
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" snake_case__ = "speech_to_text" snake_case__ = ["past_key_values"] snake_case__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=10_000 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_048 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Any=2_048 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]="relu" , SCREAMING_SNAKE_CASE__ : Tuple=256 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : Tuple=6_000 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_024 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=(5, 5) , SCREAMING_SNAKE_CASE__ : Any=1_024 , SCREAMING_SNAKE_CASE__ : str=80 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , **SCREAMING_SNAKE_CASE__ : int , ) -> List[str]: lowerCAmelCase__ = vocab_size lowerCAmelCase__ = d_model lowerCAmelCase__ = encoder_ffn_dim lowerCAmelCase__ = encoder_layers lowerCAmelCase__ = encoder_attention_heads lowerCAmelCase__ = decoder_ffn_dim lowerCAmelCase__ = decoder_layers lowerCAmelCase__ = decoder_attention_heads lowerCAmelCase__ = dropout lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = activation_dropout lowerCAmelCase__ = activation_function lowerCAmelCase__ = init_std lowerCAmelCase__ = encoder_layerdrop lowerCAmelCase__ = decoder_layerdrop lowerCAmelCase__ = use_cache lowerCAmelCase__ = encoder_layers lowerCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowerCAmelCase__ = max_source_positions lowerCAmelCase__ = max_target_positions lowerCAmelCase__ = num_conv_layers lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = conv_channels lowerCAmelCase__ = input_feat_per_channel lowerCAmelCase__ = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` " f'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ' f'`config.num_conv_layers = {self.num_conv_layers}`.' ) super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
125
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask UpperCamelCase = logging.getLogger(__name__) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=-1 ) -> str: # in NER datasets, the last column is usually reserved for NER label lowerCAmelCase__ = label_idx def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = mode.value lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' ) lowerCAmelCase__ = 1 lowerCAmelCase__ = [] with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f: lowerCAmelCase__ = [] lowerCAmelCase__ = [] for line in f: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) ) guid_index += 1 lowerCAmelCase__ = [] lowerCAmelCase__ = [] else: lowerCAmelCase__ = line.split(" " ) words.append(splits[0] ) if len(SCREAMING_SNAKE_CASE__ ) > 1: labels.append(splits[self.label_idx].replace("\n" , "" ) ) else: # Examples could have no label for mode = "test" labels.append("O" ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) ) return examples def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> Dict: lowerCAmelCase__ = 0 for line in test_input_reader: if line.startswith("-DOCSTART-" ) or line == "" or line == "\n": writer.write(SCREAMING_SNAKE_CASE__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowerCAmelCase__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n" writer.write(SCREAMING_SNAKE_CASE__ ) else: logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] ) def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: if path: with open(SCREAMING_SNAKE_CASE__ , "r" ) as f: lowerCAmelCase__ = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ = ["O"] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def __init__( self : Dict ) -> List[str]: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def a ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: if path: with open(SCREAMING_SNAKE_CASE__ , "r" ) as f: lowerCAmelCase__ = f.read().splitlines() if "O" not in labels: lowerCAmelCase__ = ["O"] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = mode.value lowerCAmelCase__ = os.path.join(SCREAMING_SNAKE_CASE__ , f'{mode}.txt' ) lowerCAmelCase__ = 1 lowerCAmelCase__ = [] with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f: for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = [] lowerCAmelCase__ = [] for token in sentence: words.append(token["form"] ) labels.append(token["upos"] ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) ) guid_index += 1 return examples def a ( self : int , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : TextIO , SCREAMING_SNAKE_CASE__ : List ) -> int: lowerCAmelCase__ = 0 for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ): lowerCAmelCase__ = preds_list[example_id] lowerCAmelCase__ = "" for token in sentence: out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(SCREAMING_SNAKE_CASE__ ) example_id += 1 def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: if path: with open(SCREAMING_SNAKE_CASE__ , "r" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
125
1
import random from typing import Any def A ( lowercase__ : list ) -> list[Any]: for _ in range(len(lowercase__ ) ): UpperCamelCase__ :Optional[int] = random.randint(0 , len(lowercase__ ) - 1 ) UpperCamelCase__ :Optional[int] = random.randint(0 , len(lowercase__ ) - 1 ) UpperCamelCase__ , UpperCamelCase__ :List[str] = data[b], data[a] return data if __name__ == "__main__": UpperCamelCase = [0, 1, 2, 3, 4, 5, 6, 7] UpperCamelCase = ["python", "says", "hello", "!"] print("Fisher-Yates Shuffle:") print("List", integers, strings) print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
45
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available lowercase : Any = { "configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST", "ErnieForCausalLM", "ErnieForMaskedLM", "ErnieForMultipleChoice", "ErnieForNextSentencePrediction", "ErnieForPreTraining", "ErnieForQuestionAnswering", "ErnieForSequenceClassification", "ErnieForTokenClassification", "ErnieModel", "ErniePreTrainedModel", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
495
0
"""simple docstring""" import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a__ : Optional[Any] = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } a__ : List[Any] = logging.get_logger(__name__) class __magic_name__ ( _UpperCamelCase ): UpperCamelCase : Union[str, Any] = "mask2former" UpperCamelCase : Union[str, Any] = ["swin"] UpperCamelCase : str = {"hidden_size": "hidden_dim"} def __init__( self , __magic_name__ = None , __magic_name__ = 2_5_6 , __magic_name__ = 2_5_6 , __magic_name__ = 2_5_6 , __magic_name__ = 1_0_2_4 , __magic_name__ = "relu" , __magic_name__ = 6 , __magic_name__ = 1_0 , __magic_name__ = 8 , __magic_name__ = 0.0 , __magic_name__ = 2_0_4_8 , __magic_name__ = False , __magic_name__ = False , __magic_name__ = 4 , __magic_name__ = 2_5_5 , __magic_name__ = 1_0_0 , __magic_name__ = 0.1 , __magic_name__ = 2.0 , __magic_name__ = 5.0 , __magic_name__ = 5.0 , __magic_name__ = 1_2_5_4_4 , __magic_name__ = 3.0 , __magic_name__ = 0.75 , __magic_name__ = 0.02 , __magic_name__ = 1.0 , __magic_name__ = True , __magic_name__ = [4, 8, 1_6, 3_2] , __magic_name__ = None , **__magic_name__ , ): """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) _lowerCAmelCase = CONFIG_MAPPING['swin']( image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__magic_name__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(__magic_name__ , __magic_name__ ): _lowerCAmelCase = backbone_config.pop('model_type' ) _lowerCAmelCase = CONFIG_MAPPING[backbone_model_type] _lowerCAmelCase = config_class.from_dict(__magic_name__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ''' F'''Supported model types: {",".join(self.backbones_supported )}''' ) _lowerCAmelCase = backbone_config _lowerCAmelCase = feature_size _lowerCAmelCase = mask_feature_size _lowerCAmelCase = hidden_dim _lowerCAmelCase = encoder_feedforward_dim _lowerCAmelCase = activation_function _lowerCAmelCase = encoder_layers _lowerCAmelCase = decoder_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = dropout _lowerCAmelCase = dim_feedforward _lowerCAmelCase = pre_norm _lowerCAmelCase = enforce_input_projection _lowerCAmelCase = common_stride _lowerCAmelCase = ignore_value _lowerCAmelCase = num_queries _lowerCAmelCase = no_object_weight _lowerCAmelCase = class_weight _lowerCAmelCase = mask_weight _lowerCAmelCase = dice_weight _lowerCAmelCase = train_num_points _lowerCAmelCase = oversample_ratio _lowerCAmelCase = importance_sample_ratio _lowerCAmelCase = init_std _lowerCAmelCase = init_xavier_std _lowerCAmelCase = use_auxiliary_loss _lowerCAmelCase = feature_strides _lowerCAmelCase = output_auxiliary_logits _lowerCAmelCase = decoder_layers super().__init__(**__magic_name__ ) @classmethod def _lowerCamelCase ( cls , __magic_name__ , **__magic_name__ ): """simple docstring""" return cls( backbone_config=__magic_name__ , **__magic_name__ , ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = copy.deepcopy(self.__dict__ ) _lowerCAmelCase = self.backbone_config.to_dict() _lowerCAmelCase = self.__class__.model_type return output
309
"""simple docstring""" import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=3_0 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1_0 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=0.6 , __magic_name__=None , ): """simple docstring""" _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = mask_ratio _lowerCAmelCase = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _lowerCAmelCase = (image_size // patch_size) ** 2 _lowerCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ): """simple docstring""" _lowerCAmelCase = ViTMAEModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() _lowerCAmelCase = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ): """simple docstring""" _lowerCAmelCase = ViTMAEForPreTraining(__magic_name__ ) model.to(__magic_name__ ) model.eval() _lowerCAmelCase = model(__magic_name__ ) _lowerCAmelCase = (self.image_size // self.patch_size) ** 2 _lowerCAmelCase = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _lowerCAmelCase = 1 _lowerCAmelCase = ViTMAEForPreTraining(__magic_name__ ) model.to(__magic_name__ ) model.eval() _lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase = model(__magic_name__ ) _lowerCAmelCase = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ): UpperCamelCase : Dict = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () UpperCamelCase : Dict = {"feature-extraction": ViTMAEModel} if is_torch_available() else {} UpperCamelCase : int = False UpperCamelCase : Optional[int] = False UpperCamelCase : Any = False UpperCamelCase : Optional[Any] = False def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = ViTMAEModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=3_7 ) def _lowerCamelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def _lowerCamelCase ( self ): """simple docstring""" pass def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__magic_name__ ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , __magic_name__ ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__magic_name__ ) def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ): """simple docstring""" np.random.seed(2 ) _lowerCAmelCase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) _lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _lowerCAmelCase = torch.from_numpy(__magic_name__ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _lowerCAmelCase = pt_noise super().check_pt_tf_models(__magic_name__ , __magic_name__ , __magic_name__ ) def _lowerCamelCase ( self ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) _lowerCAmelCase = outputs[0].cpu().numpy() _lowerCAmelCase = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__magic_name__ ) _lowerCAmelCase = model_class.from_pretrained(__magic_name__ ) model.to(__magic_name__ ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) # Make sure we don't have nans _lowerCAmelCase = after_outputs[0].cpu().numpy() _lowerCAmelCase = 0 _lowerCAmelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__magic_name__ , 1e-5 ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def _lowerCamelCase ( self ): """simple docstring""" pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def _lowerCamelCase ( self ): """simple docstring""" pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def _lowerCamelCase ( self ): """simple docstring""" pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def _lowerCamelCase ( self ): """simple docstring""" pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _lowerCamelCase ( self ): """simple docstring""" pass @slow def _lowerCamelCase ( self ): """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = ViTMAEModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def A__ ( ): """simple docstring""" _lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): @cached_property def _lowerCamelCase ( self ): """simple docstring""" return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def _lowerCamelCase ( self ): """simple docstring""" np.random.seed(2 ) _lowerCAmelCase = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(__magic_name__ ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=__magic_name__ , return_tensors='pt' ).to(__magic_name__ ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _lowerCAmelCase = ViTMAEConfig() _lowerCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _lowerCAmelCase = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): _lowerCAmelCase = model(**__magic_name__ , noise=torch.from_numpy(__magic_name__ ).to(device=__magic_name__ ) ) # verify the logits _lowerCAmelCase = torch.Size((1, 1_9_6, 7_6_8) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) _lowerCAmelCase = torch.tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__magic_name__ ) , atol=1e-4 ) )
309
1
'''simple docstring''' import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput __SCREAMING_SNAKE_CASE = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCAmelCase__ ( _lowerCAmelCase ): """simple docstring""" def __init__( self : str , *A__ : Dict , A__ : Any=None , A__ : List[str]=None , A__ : List[str]=None , **A__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ ) a__ : str = eval_examples a__ : List[Any] = post_process_function a__ : List[Any] = quant_trainer_args a__ : List[Any] = 1_2_8 # default number of calibration samples def __lowerCAmelCase ( self : Dict , A__ : str=None ) -> List[str]: '''simple docstring''' if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) a__ : Dict = calib_dataset if calib_dataset is not None else self.calib_dataset a__ : Dict = self._remove_unused_columns(UpperCAmelCase_ , description='''Calibration''' ) return DataLoader( UpperCAmelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase_ , ) def __lowerCAmelCase ( self : List[str] , A__ : Tuple=None ) -> Union[str, Any]: '''simple docstring''' a__ : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset a__ : Optional[int] = self.get_calib_dataloader(UpperCAmelCase_ ) a__ : str = self.model quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args , calib=UpperCAmelCase_ ) model.eval() quant_trainer.enable_calibration(UpperCAmelCase_ ) logger.info('''***** Running calibration *****''' ) logger.info(F' Num examples = {self.calib_num}' ) logger.info(F' Batch size = {calib_dataloader.batch_size}' ) for step, inputs in enumerate(UpperCAmelCase_ ): # Prediction step a__ , a__ , a__ : str = self.prediction_step(UpperCAmelCase_ , UpperCAmelCase_ , prediction_loss_only=UpperCAmelCase_ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(UpperCAmelCase_ , self.quant_trainer_args ) a__ : List[Any] = model def __lowerCAmelCase ( self : List[Any] , A__ : Optional[Any]=None , A__ : str=None , A__ : List[str]=None , A__ : str = "eval" ) -> Tuple: '''simple docstring''' a__ : str = self.eval_dataset if eval_dataset is None else eval_dataset a__ : List[str] = self.get_eval_dataloader(UpperCAmelCase_ ) a__ : int = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. a__ : List[Any] = self.compute_metrics a__ : List[str] = None a__ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: a__ : List[str] = eval_loop( UpperCAmelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , ) finally: a__ : str = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: a__ : int = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions ) a__ : Union[str, Any] = self.compute_metrics(UpperCAmelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'{metric_key_prefix}_' ): a__ : Union[str, Any] = metrics.pop(UpperCAmelCase_ ) self.log(UpperCAmelCase_ ) else: a__ : Dict = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) a__ : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase_ ) return metrics def __lowerCAmelCase ( self : Optional[int] , A__ : Tuple , A__ : str , A__ : Tuple=None , A__ : str = "test" ) -> List[str]: '''simple docstring''' a__ : str = self.get_test_dataloader(UpperCAmelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. a__ : List[str] = self.compute_metrics a__ : Tuple = None a__ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: a__ : int = eval_loop( UpperCAmelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , ) finally: a__ : List[Any] = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output a__ : Optional[int] = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions , '''predict''' ) a__ : List[Any] = self.compute_metrics(UpperCAmelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'{metric_key_prefix}_' ): a__ : List[Any] = metrics.pop(UpperCAmelCase_ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] , A__ : Union[str, Any]="./" ) -> Optional[int]: '''simple docstring''' a__ : List[str] = self.eval_dataset a__ : Optional[Any] = self.get_eval_dataloader(UpperCAmelCase_ ) a__ : Optional[Any] = next(iter(UpperCAmelCase_ ) ) # saving device - to make it consistent a__ : Any = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple a__ : Optional[Any] = tuple(v.to(UpperCAmelCase_ ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer a__ : str = True a__ : Tuple = self.model.to(UpperCAmelCase_ ) model.eval() model.float() a__ : List[Any] = model.module if hasattr(UpperCAmelCase_ , '''module''' ) else model quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args ) a__ : Tuple = os.path.join(UpperCAmelCase_ , '''model.onnx''' ) logger.info(F'exporting model to {output_model_file}' ) a__ : int = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , export_params=UpperCAmelCase_ , opset_version=1_3 , do_constant_folding=UpperCAmelCase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=UpperCAmelCase_ , ) logger.info('''onnx export finished''' )
688
"""simple docstring""" import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class __a ( _lowerCAmelCase ): def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any )-> None: """simple docstring""" warnings.warn( "The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use YolosImageProcessor instead." , UpperCAmelCase_ , ) super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
554
0
from __future__ import annotations import math class lowerCamelCase__ : def __init__( self : List[str] , __a : int ): '''simple docstring''' lowerCamelCase__: int = size # approximate the overall size of segment tree with given value lowerCamelCase__: Optional[Any] = [0 for i in range(0 , 4 * size )] # create array to store lazy update lowerCamelCase__: str = [0 for i in range(0 , 4 * size )] lowerCamelCase__: Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def lowerCamelCase_ ( self : Optional[Any] , __a : int ): '''simple docstring''' return idx * 2 def lowerCamelCase_ ( self : int , __a : int ): '''simple docstring''' return idx * 2 + 1 def lowerCamelCase_ ( self : Union[str, Any] , __a : int , __a : int , __a : int , __a : list[int] ): '''simple docstring''' if left_element == right_element: lowerCamelCase__: Dict = a[left_element - 1] else: lowerCamelCase__: List[str] = (left_element + right_element) // 2 self.build(self.left(__a ) , __a , __a , __a ) self.build(self.right(__a ) , mid + 1 , __a , __a ) lowerCamelCase__: str = max( self.segment_tree[self.left(__a )] , self.segment_tree[self.right(__a )] ) def lowerCamelCase_ ( self : Union[str, Any] , __a : int , __a : int , __a : int , __a : int , __a : int , __a : int ): '''simple docstring''' if self.flag[idx] is True: lowerCamelCase__: List[str] = self.lazy[idx] lowerCamelCase__: Optional[Any] = False if left_element != right_element: lowerCamelCase__: str = self.lazy[idx] lowerCamelCase__: List[str] = self.lazy[idx] lowerCamelCase__: Optional[int] = True lowerCamelCase__: List[str] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: lowerCamelCase__: Optional[int] = val if left_element != right_element: lowerCamelCase__: Optional[int] = val lowerCamelCase__: Optional[int] = val lowerCamelCase__: Tuple = True lowerCamelCase__: Tuple = True return True lowerCamelCase__: Union[str, Any] = (left_element + right_element) // 2 self.update(self.left(__a ) , __a , __a , __a , __a , __a ) self.update(self.right(__a ) , mid + 1 , __a , __a , __a , __a ) lowerCamelCase__: List[Any] = max( self.segment_tree[self.left(__a )] , self.segment_tree[self.right(__a )] ) return True def lowerCamelCase_ ( self : Dict , __a : int , __a : int , __a : int , __a : int , __a : int ): '''simple docstring''' if self.flag[idx] is True: lowerCamelCase__: str = self.lazy[idx] lowerCamelCase__: Dict = False if left_element != right_element: lowerCamelCase__: int = self.lazy[idx] lowerCamelCase__: Dict = self.lazy[idx] lowerCamelCase__: Optional[int] = True lowerCamelCase__: int = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] lowerCamelCase__: Dict = (left_element + right_element) // 2 lowerCamelCase__: Optional[int] = self.query(self.left(__a ) , __a , __a , __a , __a ) lowerCamelCase__: Optional[int] = self.query(self.right(__a ) , mid + 1 , __a , __a , __a ) return max(__a , __a ) def __str__( self : List[str] ): '''simple docstring''' return str([self.query(1 , 1 , self.size , __a , __a ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": _lowercase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _lowercase = 15 _lowercase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
242
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class lowerCamelCase__ : def __init__( self : Any , __a : str , __a : int = 13 , __a : int = 64 , __a : int = 2 , __a : int = 3 , __a : int = 3 , __a : bool = True , __a : bool = True , __a : int = 128 , __a : Any=[16, 32, 64, 128] , __a : int = 7 , __a : int = 4 , __a : int = 37 , __a : str = "gelu" , __a : float = 0.1 , __a : float = 0.1 , __a : int = 10 , __a : float = 0.02 , __a : int = 2 , __a : int = 1 , __a : int = 128 , __a : List[int] = [2, 2, 2, 2] , __a : int = 2 , __a : int = 2 , ): '''simple docstring''' lowerCamelCase__: Any = parent lowerCamelCase__: Optional[int] = batch_size lowerCamelCase__: List[Any] = image_size lowerCamelCase__: Dict = patch_size lowerCamelCase__: int = num_channels lowerCamelCase__: Any = is_training lowerCamelCase__: List[Any] = use_labels lowerCamelCase__: List[Any] = hidden_size lowerCamelCase__: Optional[Any] = num_hidden_layers lowerCamelCase__: Optional[Any] = num_attention_heads lowerCamelCase__: int = intermediate_size lowerCamelCase__: Dict = hidden_act lowerCamelCase__: Any = hidden_dropout_prob lowerCamelCase__: Dict = attention_probs_dropout_prob lowerCamelCase__: Union[str, Any] = type_sequence_label_size lowerCamelCase__: List[str] = initializer_range lowerCamelCase__: List[str] = encoder_stride lowerCamelCase__: Dict = num_attention_outputs lowerCamelCase__: Dict = embed_dim lowerCamelCase__: Optional[int] = embed_dim + 1 lowerCamelCase__: str = resolution lowerCamelCase__: Dict = depths lowerCamelCase__: Optional[int] = hidden_sizes lowerCamelCase__: Any = dim lowerCamelCase__: Optional[Any] = mlp_expansion_ratio def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__: Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__: Any = None if self.use_labels: lowerCamelCase__: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__: Dict = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def lowerCamelCase_ ( self : Optional[int] , __a : str , __a : Optional[Any] , __a : Dict ): '''simple docstring''' lowerCamelCase__: int = TFEfficientFormerModel(config=__a ) lowerCamelCase__: List[str] = model(__a , training=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : int , __a : Optional[Any] , __a : List[str] , __a : Union[str, Any] ): '''simple docstring''' lowerCamelCase__: Optional[int] = self.type_sequence_label_size lowerCamelCase__: List[str] = TFEfficientFormerForImageClassification(__a ) lowerCamelCase__: List[Any] = model(__a , labels=__a , training=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__: Any = 1 lowerCamelCase__: int = TFEfficientFormerForImageClassification(__a ) lowerCamelCase__: Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__: int = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowerCamelCase__: Any = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple = config_and_inputs lowerCamelCase__: List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase__ ( A__ , A__ , unittest.TestCase ): __lowerCamelCase = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) __lowerCamelCase = ( { """feature-extraction""": TFEfficientFormerModel, """image-classification""": ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False __lowerCamelCase = False def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowerCamelCase__: Optional[int] = TFEfficientFormerModelTester(self ) lowerCamelCase__: Union[str, Any] = ConfigTester( self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' pass @unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__: Optional[int] = model_class(__a ) lowerCamelCase__: int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__: Dict = [*signature.parameters.keys()] lowerCamelCase__: str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(__a : List[str] , __a : str , __a : Tuple ): lowerCamelCase__: List[Any] = model_class(__a ) lowerCamelCase__: Optional[Any] = model(**self._prepare_for_class(__a , __a ) , training=__a ) lowerCamelCase__: Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase__: Union[str, Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__a ) , __a ) if hasattr(self.model_tester , """encoder_seq_length""" ): lowerCamelCase__: int = self.model_tester.encoder_seq_length if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1: lowerCamelCase__: Optional[Any] = seq_length * self.model_tester.chunk_length else: lowerCamelCase__: Union[str, Any] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: lowerCamelCase__: Optional[Any] = outputs.decoder_hidden_states self.asseretIsInstance(__a , (list, tuple) ) self.assertEqual(len(__a ) , __a ) lowerCamelCase__: List[Any] = getattr(self.model_tester , """seq_length""" , __a ) lowerCamelCase__: Optional[int] = getattr(self.model_tester , """decoder_seq_length""" , __a ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) lowerCamelCase__ , lowerCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__: Any = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase__: str = True check_hidden_states_output(__a , __a , __a ) def lowerCamelCase_ ( self : List[Any] , __a : int , __a : Tuple , __a : str=False ): '''simple docstring''' lowerCamelCase__: List[str] = super()._prepare_for_class(__a , __a , return_labels=__a ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowerCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) @unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowerCamelCase__: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__: List[Any] = TFEfficientFormerModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase_ ( self : str ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__: str = True lowerCamelCase__: Tuple = getattr(self.model_tester , """seq_length""" , __a ) lowerCamelCase__: Tuple = getattr(self.model_tester , """encoder_seq_length""" , __a ) lowerCamelCase__: Optional[Any] = getattr(self.model_tester , """key_length""" , __a ) lowerCamelCase__: Tuple = getattr(self.model_tester , """chunk_length""" , __a ) if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ): lowerCamelCase__: Tuple = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: lowerCamelCase__: List[str] = True lowerCamelCase__: Dict = False lowerCamelCase__: str = True lowerCamelCase__: int = model_class(__a ) lowerCamelCase__: Any = model(**self._prepare_for_class(__a , __a ) , training=__a ) lowerCamelCase__: Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase__: Optional[Any] = True lowerCamelCase__: str = model_class(__a ) lowerCamelCase__: Optional[Any] = model(**self._prepare_for_class(__a , __a ) , training=__a ) lowerCamelCase__: Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(__a ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def lowerCamelCase_ ( self : int ): '''simple docstring''' lowerCamelCase__ , lowerCamelCase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model lowerCamelCase__: List[str] = model_class(__a ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes lowerCamelCase__: str = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a ) for key, val in model.input_signature.items() if key in model.dummy_inputs } lowerCamelCase__: Optional[int] = model(__a ) self.assertTrue(outputs_dict is not None ) def __lowerCAmelCase ( ) -> Any: '''simple docstring''' lowerCamelCase__: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class lowerCamelCase__ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return ( EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowerCamelCase__: Any = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" ) lowerCamelCase__: str = self.default_image_processor lowerCamelCase__: List[Any] = prepare_img() lowerCamelCase__: List[str] = image_processor(images=__a , return_tensors="""tf""" ) # forward pass lowerCamelCase__: int = model(**__a , training=__a ) # verify the logits lowerCamelCase__: Union[str, Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase__: str = tf.constant([-0.0_555, 0.4_825, -0.0_852] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' lowerCamelCase__: Tuple = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( """snap-research/efficientformer-l1-300""" ) lowerCamelCase__: Union[str, Any] = self.default_image_processor lowerCamelCase__: List[Any] = prepare_img() lowerCamelCase__: Any = image_processor(images=__a , return_tensors="""tf""" ) # forward pass lowerCamelCase__: Union[str, Any] = model(**__a , training=__a ) # verify the logits lowerCamelCase__: Any = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase__: Optional[int] = tf.constant([-0.1_312, 0.4_353, -1.0_499] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
242
1
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __UpperCAmelCase : Tuple = pd.read_csv("sample_data.csv", header=None) __UpperCAmelCase : str = df.shape[:1][0] # If you're using some other dataset input the target column __UpperCAmelCase : Tuple = df.iloc[:, 1:2] __UpperCAmelCase : List[Any] = actual_data.values.reshape(len_data, 1) __UpperCAmelCase : str = MinMaxScaler().fit_transform(actual_data) __UpperCAmelCase : Union[str, Any] = 1_0 __UpperCAmelCase : Any = 5 __UpperCAmelCase : Dict = 2_0 __UpperCAmelCase : int = len_data - periods * look_back __UpperCAmelCase : str = actual_data[:division] __UpperCAmelCase : Any = actual_data[division - look_back :] __UpperCAmelCase , __UpperCAmelCase : List[str] = [], [] __UpperCAmelCase , __UpperCAmelCase : List[str] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __UpperCAmelCase : Optional[Any] = np.array(train_x) __UpperCAmelCase : Dict = np.array(test_x) __UpperCAmelCase : Dict = np.array([list(i.ravel()) for i in train_y]) __UpperCAmelCase : int = np.array([list(i.ravel()) for i in test_y]) __UpperCAmelCase : Optional[int] = Sequential() model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(6_4, input_shape=(1_2_8, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") __UpperCAmelCase : List[str] = model.fit( x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4 ) __UpperCAmelCase : Optional[int] = model.predict(x_test)
241
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _snake_case ( _A ): _A = 'Speech2TextFeatureExtractor' _A = 'Speech2TextTokenizer' def __init__( self ,UpperCamelCase ,UpperCamelCase ) -> int: super().__init__(UpperCamelCase ,UpperCamelCase ) snake_case__ :int = self.feature_extractor snake_case__ :Any = False def __call__( self ,*UpperCamelCase ,**UpperCamelCase ) -> Optional[Any]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*UpperCamelCase ,**UpperCamelCase ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) snake_case__ :int = kwargs.pop("raw_speech" ) else: snake_case__ :Tuple = kwargs.pop("audio" ,UpperCamelCase ) snake_case__ :List[Any] = kwargs.pop("sampling_rate" ,UpperCamelCase ) snake_case__ :Tuple = kwargs.pop("text" ,UpperCamelCase ) if len(UpperCamelCase ) > 0: snake_case__ :str = args[0] snake_case__ :Tuple = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: snake_case__ :List[Any] = self.feature_extractor(UpperCamelCase ,*UpperCamelCase ,sampling_rate=UpperCamelCase ,**UpperCamelCase ) if text is not None: snake_case__ :List[Any] = self.tokenizer(UpperCamelCase ,**UpperCamelCase ) if text is None: return inputs elif audio is None: return encodings else: snake_case__ :Optional[Any] = encodings["input_ids"] return inputs def lowerCAmelCase_ ( self ,*UpperCamelCase ,**UpperCamelCase ) -> Any: return self.tokenizer.batch_decode(*UpperCamelCase ,**UpperCamelCase ) def lowerCAmelCase_ ( self ,*UpperCamelCase ,**UpperCamelCase ) -> Optional[Any]: return self.tokenizer.decode(*UpperCamelCase ,**UpperCamelCase ) @contextmanager def lowerCAmelCase_ ( self ) -> Union[str, Any]: warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) snake_case__ :Optional[int] = True snake_case__ :str = self.tokenizer yield snake_case__ :Optional[Any] = self.feature_extractor snake_case__ :Tuple = False
241
1
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class snake_case__ : """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None def lowercase_ (): snake_case__ : Tuple = Node(1 ) snake_case__ : Tuple = Node(2 ) snake_case__ : str = Node(3 ) snake_case__ : Tuple = Node(4 ) snake_case__ : Any = Node(5 ) return tree def lowercase_ (A : Node | None ): return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowercase_ (A : Node | None ): return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowercase_ (A : Node | None ): return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowercase_ (A : Node | None ): return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def lowercase_ (A : Node | None ): snake_case__ : list[Any] = [] if root is None: return output snake_case__ : Optional[Any] = deque([root] ) while process_queue: snake_case__ : Optional[int] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowercase_ (A : Node | None , A : int ): snake_case__ : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(A , A ) return output def lowercase_ (A : Node | None , A : int ): snake_case__ : list[Any] = [] def populate_output(A : Node | None , A : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(A , A ) return output def lowercase_ (A : Node | None ): if root is None: return [] snake_case__ : list[Sequence[Node | None]] = [] snake_case__ : int = 0 snake_case__ : List[str] = height(A ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(A , A ) ) snake_case__ : int = 1 else: output.append(get_nodes_from_right_to_left(A , A ) ) snake_case__ : Any = 0 return output def lowercase_ (): # Main function for testing. snake_case__ : str = make_tree() print(F'''In-order Traversal: {inorder(A )}''' ) print(F'''Pre-order Traversal: {preorder(A )}''' ) print(F'''Post-order Traversal: {postorder(A )}''' , '\n' ) print(F'''Height of Tree: {height(A )}''' , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(A ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(A ) + 1 ): print(F'''Level {level}:''' , get_nodes_from_left_to_right(A , level=A ) ) print('\nZigZag order Traversal: ' ) print(zigzag(A ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
701
from __future__ import annotations def lowercase_ (A : list[int] ): return len(set(A ) ) == len(A ) if __name__ == "__main__": import doctest doctest.testmod()
243
0
"""simple docstring""" def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ): """simple docstring""" return price * (1 + tax_rate) if __name__ == "__main__": print(f'{price_plus_tax(1_0_0, 0.25) = }') print(f'{price_plus_tax(1_25.50, 0.05) = }')
580
"""simple docstring""" import math def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ): """simple docstring""" if ( not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * power_factor def __snake_case ( SCREAMING_SNAKE_CASE: float , SCREAMING_SNAKE_CASE: float ): """simple docstring""" if ( not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
580
1
# limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE__ ( lowercase__ ): def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def __call__( self : Any , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE__ : int = 5_0 , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Union[ImagePipelineOutput, Tuple]: a_ : Tuple = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE__ , ) a_ : int = image.to(self.device ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output a_ : Dict = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 a_ : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample a_ : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) a_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": a_ : Optional[int] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ ), "This is a local test"
443
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets UpperCAmelCase_ : int = datasets.logging.get_logger(__name__) UpperCAmelCase_ : Dict = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' UpperCAmelCase_ : Tuple = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' UpperCAmelCase_ : Optional[Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Any , __A : List[Any]=False , __A : Tuple=False , __A : Any=True , __A : Any=False , __A : Any="dummy_doc" ) -> List[str]: """simple docstring""" a_ : List[str] = {doc: key_lines} a_ : Optional[Any] = {doc: sys_lines} a_ : List[Any] = {} a_ : Tuple = 0 a_ : List[str] = 0 a_ : Union[str, Any] = 0 a_ : List[Any] = 0 a_ : List[str] = 0 a_ : Union[str, Any] = 0 a_ , a_ : List[Any] = reader.get_doc_mentions(__A , key_doc_lines[doc] , __A ) key_singletons_num += singletons_num if NP_only or min_span: a_ : Union[str, Any] = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A ) a_ , a_ : Union[str, Any] = reader.get_doc_mentions(__A , sys_doc_lines[doc] , __A ) sys_singletons_num += singletons_num if NP_only or min_span: a_ : Dict = reader.set_annotated_parse_trees(__A , key_doc_lines[doc] , __A , __A ) if remove_nested: a_ , a_ : Optional[Any] = reader.remove_nested_coref_mentions(__A , __A ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters a_ , a_ : Optional[Any] = reader.remove_nested_coref_mentions(__A , __A ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters a_ : int = reader.get_mention_assignments(__A , __A ) a_ : List[Any] = reader.get_mention_assignments(__A , __A ) a_ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( 'Number of removed nested coreferring mentions in the key ' F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" ) logger.info( 'Number of resulting singleton clusters in the key ' F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" ) if not keep_singletons: logger.info( F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """ 'files, respectively' ) return doc_coref_infos def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Optional[int] , __A : Optional[Any] , __A : Optional[int] , __A : Tuple , __A : Dict , __A : Optional[int] ) -> List[Any]: """simple docstring""" a_ : int = get_coref_infos(__A , __A , __A , __A , __A , __A ) a_ : List[Any] = {} a_ : int = 0 a_ : Optional[int] = 0 for name, metric in metrics: a_ , a_ , a_ : Tuple = evaluator.evaluate_documents(__A , __A , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} ) logger.info( name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , ) if conll_subparts_num == 3: a_ : List[str] = (conll / 3) * 1_00 logger.info(F"""CoNLL score: {conll:.2f}""" ) output_scores.update({'conll_score': conll} ) return output_scores def SCREAMING_SNAKE_CASE_ ( __A : int ) -> Dict: """simple docstring""" a_ : List[Any] = False for line in key_lines: if not line.startswith('#' ): if len(line.split() ) > 6: a_ : List[Any] = line.split()[5] if not parse_col == "-": a_ : List[Any] = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Sequence(datasets.Value('string' ) ), } ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[ 'https://github.com/ns-moosavi/coval', 'https://www.aclweb.org/anthology/P16-1060', 'http://www.conll.cemantix.org/2012/data.html', ] , ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : int=False ) -> Tuple: a_ : List[str] = [ ('mentions', evaluator.mentions), ('muc', evaluator.muc), ('bcub', evaluator.b_cubed), ('ceafe', evaluator.ceafe), ('lea', evaluator.lea), ] if min_span: a_ : Union[str, Any] = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE__ ) if not has_gold_parse: raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" a_ : List[Any] = evaluate( key_lines=SCREAMING_SNAKE_CASE__ , sys_lines=SCREAMING_SNAKE_CASE__ , metrics=SCREAMING_SNAKE_CASE__ , NP_only=SCREAMING_SNAKE_CASE__ , remove_nested=SCREAMING_SNAKE_CASE__ , keep_singletons=SCREAMING_SNAKE_CASE__ , min_span=SCREAMING_SNAKE_CASE__ , ) return score
443
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''', } class lowerCAmelCase ( A ): lowerCAmelCase_ = "nllb-moe" lowerCAmelCase_ = ["past_key_values"] lowerCAmelCase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Optional[int] , __lowercase : List[Any]=128112 , __lowercase : int=1024 , __lowercase : Any=12 , __lowercase : Any=4096 , __lowercase : Optional[Any]=16 , __lowercase : List[str]=12 , __lowercase : str=4096 , __lowercase : int=16 , __lowercase : str=0.0_5 , __lowercase : Optional[Any]=0.0_5 , __lowercase : Tuple=True , __lowercase : Optional[int]=True , __lowercase : Union[str, Any]="relu" , __lowercase : Any=1024 , __lowercase : Optional[Any]=0.1 , __lowercase : int=0.1 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0_2 , __lowercase : Union[str, Any]=2 , __lowercase : Optional[int]=True , __lowercase : List[str]=False , __lowercase : List[str]="float32" , __lowercase : Dict=False , __lowercase : List[str]=128 , __lowercase : Union[str, Any]=64 , __lowercase : Any=4 , __lowercase : Optional[Any]=4 , __lowercase : str=0.0_0_1 , __lowercase : str=0.0_0_1 , __lowercase : Dict="all" , __lowercase : int=False , __lowercase : Optional[Any]=False , __lowercase : List[str]=1.0 , __lowercase : Dict=0.2 , __lowercase : List[Any]=1 , __lowercase : Optional[int]=0 , __lowercase : str=2 , __lowercase : Optional[Any]=False , **__lowercase : Dict , ): """simple docstring""" __lowercase =vocab_size __lowercase =max_position_embeddings __lowercase =d_model __lowercase =encoder_ffn_dim __lowercase =encoder_layers __lowercase =encoder_attention_heads __lowercase =decoder_ffn_dim __lowercase =decoder_layers __lowercase =decoder_attention_heads __lowercase =dropout __lowercase =attention_dropout __lowercase =activation_dropout __lowercase =activation_function __lowercase =init_std __lowercase =encoder_layerdrop __lowercase =decoder_layerdrop __lowercase =use_cache __lowercase =encoder_layers __lowercase =scale_embedding # scale factor will be sqrt(d_model) if True __lowercase =router_z_loss_coef __lowercase =router_aux_loss_coef __lowercase =decoder_sparse_step __lowercase =encoder_sparse_step __lowercase =num_experts __lowercase =expert_capacity __lowercase =router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' ) __lowercase =router_dtype __lowercase =router_ignore_padding_tokens __lowercase =batch_prioritized_routing __lowercase =second_expert_policy __lowercase =normalize_router_prob_before_dropping __lowercase =moe_eval_capacity_token_fraction __lowercase =moe_token_dropout __lowercase =output_router_logits super().__init__( pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , )
119
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''', # See all Dinat models at https://huggingface.co/models?filter=dinat } class lowerCAmelCase ( A , A ): lowerCAmelCase_ = "dinat" lowerCAmelCase_ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : int , __lowercase : str=4 , __lowercase : str=3 , __lowercase : int=64 , __lowercase : int=[3, 4, 6, 5] , __lowercase : Union[str, Any]=[2, 4, 8, 16] , __lowercase : Optional[int]=7 , __lowercase : str=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __lowercase : Any=3.0 , __lowercase : Optional[Any]=True , __lowercase : List[Any]=0.0 , __lowercase : Optional[Any]=0.0 , __lowercase : int=0.1 , __lowercase : Union[str, Any]="gelu" , __lowercase : Any=0.0_2 , __lowercase : int=1E-5 , __lowercase : Optional[int]=0.0 , __lowercase : Optional[int]=None , __lowercase : str=None , **__lowercase : Any , ): """simple docstring""" super().__init__(**__lowercase ) __lowercase =patch_size __lowercase =num_channels __lowercase =embed_dim __lowercase =depths __lowercase =len(__lowercase ) __lowercase =num_heads __lowercase =kernel_size __lowercase =dilations __lowercase =mlp_ratio __lowercase =qkv_bias __lowercase =hidden_dropout_prob __lowercase =attention_probs_dropout_prob __lowercase =drop_path_rate __lowercase =hidden_act __lowercase =layer_norm_eps __lowercase =initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase =int(embed_dim * 2 ** (len(__lowercase ) - 1) ) __lowercase =layer_scale_init_value __lowercase =['stem'] + [f'''stage{idx}''' for idx in range(1 , len(__lowercase ) + 1 )] __lowercase , __lowercase =get_aligned_output_features_output_indices( out_features=__lowercase , out_indices=__lowercase , stage_names=self.stage_names )
119
1
'''simple docstring''' import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def _lowerCamelCase ( self ): """simple docstring""" super().setUp() __lowerCamelCase = [ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _lowerCamelCase ( self , **_snake_case ): """simple docstring""" __lowerCamelCase = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_snake_case ) def _lowerCamelCase ( self , _snake_case ): """simple docstring""" __lowerCamelCase = '''<unk> UNwanted , running''' __lowerCamelCase = '''<unk> unwanted, running''' return input_text, output_text def _lowerCamelCase ( self ): """simple docstring""" __lowerCamelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_snake_case ) __lowerCamelCase = tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(_snake_case , ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , [0, 4, 8, 7] ) def _lowerCamelCase ( self ): """simple docstring""" __lowerCamelCase = TransfoXLTokenizer(lower_case=_snake_case ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def _lowerCamelCase ( self ): """simple docstring""" __lowerCamelCase = TransfoXLTokenizer(lower_case=_snake_case ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _lowerCamelCase ( self ): """simple docstring""" __lowerCamelCase = TransfoXLTokenizer(lower_case=_snake_case ) __lowerCamelCase = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' __lowerCamelCase = [ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(_snake_case ) , _snake_case ) self.assertEqual(tokenizer.convert_tokens_to_string(_snake_case ) , _snake_case ) def _lowerCamelCase ( self ): """simple docstring""" __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = len(_snake_case ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(_snake_case ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
575
'''simple docstring''' def lowerCamelCase_ ( A_ ): __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = { '''^''': 3, '''*''': 2, '''/''': 2, '''%''': 2, '''+''': 1, '''-''': 1, } # Priority of each operator __lowerCamelCase = len(A_ ) if (len(A_ ) > 7) else 7 # Print table header for output print( '''Symbol'''.center(8 ) , '''Stack'''.center(A_ ) , '''Postfix'''.center(A_ ) , sep=''' | ''' , ) print('''-''' * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(A_ ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(A_ ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(A_ ) == 0: stack.append(A_ ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(A_ ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(A_ ) # push x to stack print( x.center(8 ) , (''''''.join(A_ )).ljust(A_ ) , (''''''.join(A_ )).ljust(A_ ) , sep=''' | ''' , ) # Output in tabular format while len(A_ ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( ''' '''.center(8 ) , (''''''.join(A_ )).ljust(A_ ) , (''''''.join(A_ )).ljust(A_ ) , sep=''' | ''' , ) # Output in tabular format return "".join(A_ ) # return Postfix as str def lowerCamelCase_ ( A_ ): __lowerCamelCase = list(infix[::-1] ) # reverse the infix equation for i in range(len(A_ ) ): if infix[i] == "(": __lowerCamelCase = ''')''' # change "(" to ")" elif infix[i] == ")": __lowerCamelCase = '''(''' # change ")" to "(" return (infix_2_postfix(''''''.join(A_ ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": _UpperCamelCase : Optional[Any] =input("\nEnter an Infix Equation = ") # Input an Infix equation _UpperCamelCase : str ="".join(Infix.split()) # Remove spaces from the input print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
575
1
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): _a = (UnCLIPScheduler,) def A__ ( self , **lowerCAmelCase ) -> Optional[int]: '''simple docstring''' _lowercase ={ 'num_train_timesteps': 1_000, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**SCREAMING_SNAKE_CASE__ ) return config def A__ ( self ) -> List[str]: '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ ) def A__ ( self ) -> str: '''simple docstring''' for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__ ) def A__ ( self ) -> Union[str, Any]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ ) def A__ ( self ) -> Tuple: '''simple docstring''' for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__ ) def A__ ( self ) -> Optional[int]: '''simple docstring''' for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ ) def A__ ( self ) -> Dict: '''simple docstring''' for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , prev_timestep=SCREAMING_SNAKE_CASE__ ) def A__ ( self ) -> Tuple: '''simple docstring''' _lowercase =self.scheduler_classes[0] _lowercase =self.get_scheduler_config(variance_type='fixed_small_log' ) _lowercase =scheduler_class(**SCREAMING_SNAKE_CASE__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5 def A__ ( self ) -> str: '''simple docstring''' _lowercase =self.scheduler_classes[0] _lowercase =self.get_scheduler_config(variance_type='learned_range' ) _lowercase =scheduler_class(**SCREAMING_SNAKE_CASE__ ) _lowercase =0.5 assert scheduler._get_variance(1 , predicted_variance=SCREAMING_SNAKE_CASE__ ) - -10.1712790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=SCREAMING_SNAKE_CASE__ ) - -5.7998052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=SCREAMING_SNAKE_CASE__ ) - -0.0010011 < 1e-5 def A__ ( self ) -> int: '''simple docstring''' _lowercase =self.scheduler_classes[0] _lowercase =self.get_scheduler_config() _lowercase =scheduler_class(**SCREAMING_SNAKE_CASE__ ) _lowercase =scheduler.timesteps _lowercase =self.dummy_model() _lowercase =self.dummy_sample_deter _lowercase =torch.manual_seed(0 ) for i, t in enumerate(SCREAMING_SNAKE_CASE__ ): # 1. predict noise residual _lowercase =model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # 2. predict previous mean of sample x_t-1 _lowercase =scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample _lowercase =pred_prev_sample _lowercase =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) _lowercase =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3284743 ) < 1e-3 def A__ ( self ) -> Optional[Any]: '''simple docstring''' _lowercase =self.scheduler_classes[0] _lowercase =self.get_scheduler_config() _lowercase =scheduler_class(**SCREAMING_SNAKE_CASE__ ) scheduler.set_timesteps(25 ) _lowercase =scheduler.timesteps _lowercase =self.dummy_model() _lowercase =self.dummy_sample_deter _lowercase =torch.manual_seed(0 ) for i, t in enumerate(SCREAMING_SNAKE_CASE__ ): # 1. predict noise residual _lowercase =model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if i + 1 == timesteps.shape[0]: _lowercase =None else: _lowercase =timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowercase =scheduler.step( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prev_timestep=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample _lowercase =pred_prev_sample _lowercase =torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) _lowercase =torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3362038 ) < 1e-3 def A__ ( self ) -> Dict: '''simple docstring''' pass def A__ ( self ) -> Dict: '''simple docstring''' pass
291
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class _a : def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : str=64 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : int=64 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=5_12 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : str=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_input_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = num_choices lowerCamelCase__ = scope def _UpperCamelCase ( self : Dict ): return MPNetConfig.from_pretrained('microsoft/mpnet-base' ) def _UpperCamelCase ( self : Optional[int] ): lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase ( self : Optional[Any] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ): lowerCamelCase__ = MPNetModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _UpperCamelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ): lowerCamelCase__ = MPNetForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowerCamelCase__ = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = MPNetForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): lowerCamelCase__ = self.num_choices lowerCamelCase__ = MPNetForMultipleChoice(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = MPNetForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self : List[str] ): lowerCamelCase__ = self.prepare_config_and_inputs() ((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = config_and_inputs lowerCamelCase__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): a_ : Any = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) a_ : Optional[Any] = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) a_ : Optional[Any] = False a_ : Any = True def _UpperCamelCase ( self : str ): lowerCamelCase__ = MPNetModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 ) def _UpperCamelCase ( self : Tuple ): self.config_tester.run_common_tests() def _UpperCamelCase ( self : int ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self : Dict ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self : int ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self : Union[str, Any] ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self : Optional[int] ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*SCREAMING_SNAKE_CASE__ ) @require_torch class _a ( unittest.TestCase ): @slow def _UpperCamelCase ( self : Optional[int] ): lowerCamelCase__ = MPNetModel.from_pretrained('microsoft/mpnet-base' ) lowerCamelCase__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) lowerCamelCase__ = model(SCREAMING_SNAKE_CASE__ )[0] lowerCamelCase__ = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ = torch.tensor( [[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
510
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm _A = logging.get_logger(__name__) @dataclass class UpperCAmelCase__ ( _snake_case ): """simple docstring""" A : int = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__(self , **_a ) -> Optional[int]: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowercase_ : Dict = deprecated_arg[3:] setattr(self , _a , not kwargs.pop(_a ) ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowercase_ : List[Any] = kwargs.pop('torchscript' , self.torchscript ) lowercase_ : Tuple = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics ) lowercase_ : Any = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level ) super().__init__(**_a ) A : bool = field(default=_snake_case , metadata={'''help''': '''Trace the models using torchscript'''} ) A : bool = field(default=_snake_case , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} ) A : str = field( default='''O1''' , metadata={ '''help''': ( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. ''' '''See details at https://nvidia.github.io/apex/amp.html''' ) } , ) @cached_property def _lowerCamelCase (self ) -> Tuple["torch.device", int]: requires_backends(self , ['torch'] ) logger.info('PyTorch: setting up devices' ) if not self.cuda: lowercase_ : List[str] = torch.device('cpu' ) lowercase_ : Optional[int] = 0 elif is_torch_tpu_available(): lowercase_ : Dict = xm.xla_device() lowercase_ : Tuple = 0 else: lowercase_ : Dict = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) lowercase_ : Tuple = torch.cuda.device_count() return device, n_gpu @property def _lowerCamelCase (self ) -> str: return is_torch_tpu_available() and self.tpu @property def _lowerCamelCase (self ) -> int: requires_backends(self , ['torch'] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def _lowerCamelCase (self ) -> "torch.device": requires_backends(self , ['torch'] ) return self._setup_devices[0] @property def _lowerCamelCase (self ) -> int: requires_backends(self , ['torch'] ) return self._setup_devices[1] @property def _lowerCamelCase (self ) -> Optional[int]: return self.n_gpu > 0
438
'''simple docstring''' import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ): lowercase_ : List[Any] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: lowercase_ : List[Any] = 128 elif "12-12" in model_name: lowercase_ : Tuple = 12 lowercase_ : List[Any] = 12 elif "14-14" in model_name: lowercase_ : List[str] = 14 lowercase_ : Optional[Any] = 14 elif "16-16" in model_name: lowercase_ : Union[str, Any] = 16 lowercase_ : List[str] = 16 else: raise ValueError('Model not supported' ) lowercase_ : Optional[Any] = 'huggingface/label-files' if "speech-commands" in model_name: lowercase_ : List[str] = 35 lowercase_ : int = 'speech-commands-v2-id2label.json' else: lowercase_ : Union[str, Any] = 527 lowercase_ : int = 'audioset-id2label.json' lowercase_ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) ) lowercase_ : Union[str, Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} lowercase_ : Optional[int] = idalabel lowercase_ : Optional[int] = {v: k for k, v in idalabel.items()} return config def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ): if "module.v" in name: lowercase_ : Dict = name.replace('module.v' , 'audio_spectrogram_transformer' ) if "cls_token" in name: lowercase_ : Optional[Any] = name.replace('cls_token' , 'embeddings.cls_token' ) if "dist_token" in name: lowercase_ : Any = name.replace('dist_token' , 'embeddings.distillation_token' ) if "pos_embed" in name: lowercase_ : List[str] = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: lowercase_ : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) # transformer blocks if "blocks" in name: lowercase_ : Optional[Any] = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: lowercase_ : Optional[int] = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: lowercase_ : Dict = name.replace('attn' , 'attention.self' ) if "norm1" in name: lowercase_ : int = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: lowercase_ : Optional[int] = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: lowercase_ : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: lowercase_ : int = name.replace('mlp.fc2' , 'output.dense' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: lowercase_ : int = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' ) # classifier head if "module.mlp_head.0" in name: lowercase_ : Dict = name.replace('module.mlp_head.0' , 'classifier.layernorm' ) if "module.mlp_head.1" in name: lowercase_ : List[Any] = name.replace('module.mlp_head.1' , 'classifier.dense' ) return name def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): for key in orig_state_dict.copy().keys(): lowercase_ : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "qkv" in key: lowercase_ : List[str] = key.split('.' ) lowercase_ : int = int(key_split[3] ) lowercase_ : Tuple = config.hidden_size if "weight" in key: lowercase_ : Tuple = val[:dim, :] lowercase_ : Union[str, Any] = val[dim : dim * 2, :] lowercase_ : Optional[int] = val[-dim:, :] else: lowercase_ : Optional[Any] = val[:dim] lowercase_ : Any = val[dim : dim * 2] lowercase_ : Tuple = val[-dim:] else: lowercase_ : Optional[Any] = val return orig_state_dict def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ): lowercase_ : List[Any] = [ 'module.v.head.weight', 'module.v.head.bias', 'module.v.head_dist.weight', 'module.v.head_dist.bias', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): lowercase_ : Dict = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ ) lowercase_ : Optional[int] = { 'ast-finetuned-audioset-10-10-0.4593': ( 'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.450': ( 'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.448': ( 'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.448-v2': ( 'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1' ), 'ast-finetuned-audioset-12-12-0.447': ( 'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1' ), 'ast-finetuned-audioset-14-14-0.443': ( 'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1' ), 'ast-finetuned-audioset-16-16-0.442': ( 'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1' ), 'ast-finetuned-speech-commands-v2': ( 'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1' ), } # load original state_dict lowercase_ : Dict = model_name_to_url[model_name] lowercase_ : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' ) # remove some keys remove_keys(SCREAMING_SNAKE_CASE_ ) # rename some keys lowercase_ : str = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load 🤗 model lowercase_ : Optional[Any] = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 lowercase_ : Tuple = -4.267_7393 if 'speech-commands' not in model_name else -6.84_5978 lowercase_ : str = 4.568_9974 if 'speech-commands' not in model_name else 5.565_4526 lowercase_ : str = 1_024 if 'speech-commands' not in model_name else 128 lowercase_ : Dict = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) if "speech-commands" in model_name: lowercase_ : Optional[Any] = load_dataset('speech_commands' , 'v0.02' , split='validation' ) lowercase_ : Any = dataset[0]['audio']['array'] else: lowercase_ : Any = hf_hub_download( repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , ) lowercase_ ,lowercase_ : Union[str, Any] = torchaudio.load(SCREAMING_SNAKE_CASE_ ) lowercase_ : str = waveform.squeeze().numpy() lowercase_ : str = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=16_000 , return_tensors='pt' ) # forward pass lowercase_ : Tuple = model(**SCREAMING_SNAKE_CASE_ ) lowercase_ : Tuple = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": lowercase_ : int = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": lowercase_ : Optional[int] = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": lowercase_ : Optional[Any] = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": lowercase_ : List[str] = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": lowercase_ : List[str] = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": lowercase_ : Any = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": lowercase_ : List[str] = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": lowercase_ : Optional[Any] = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('Unknown model name' ) if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ): raise ValueError('Logits don\'t match' ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(f'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print('Pushing model and feature extractor to the hub...' ) model.push_to_hub(f'''MIT/{model_name}''' ) feature_extractor.push_to_hub(f'''MIT/{model_name}''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _A = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
438
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() snake_case__ : int = logging.get_logger(__name__) def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ) ->List[str]: _UpperCAmelCase =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCAmelCase =[(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->str: for i in range(config.num_hidden_layers ): if base_model: _UpperCAmelCase ="" else: _UpperCAmelCase ="vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase =state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) _UpperCAmelCase =state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase =in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase =in_proj_bias[: config.hidden_size] _UpperCAmelCase =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase =in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase =in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( _lowerCamelCase ) ->Any: _UpperCAmelCase =["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase ) def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int: _UpperCAmelCase =dct.pop(_lowerCamelCase ) _UpperCAmelCase =val def lowerCamelCase__ ( ) ->Optional[Any]: _UpperCAmelCase ="http://images.cocodataset.org/val2017/000000039769.jpg" _UpperCAmelCase =Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ) ->Optional[int]: _UpperCAmelCase =ViTConfig() # patch_size if model_name[-1] == "8": _UpperCAmelCase =8 # set labels if required if not base_model: _UpperCAmelCase =1000 _UpperCAmelCase ="huggingface/label-files" _UpperCAmelCase ="imagenet-1k-id2label.json" _UpperCAmelCase =json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _UpperCAmelCase ={int(_lowerCamelCase ): v for k, v in idalabel.items()} _UpperCAmelCase =idalabel _UpperCAmelCase ={v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: _UpperCAmelCase =384 _UpperCAmelCase =1536 _UpperCAmelCase =12 _UpperCAmelCase =6 # load original model from torch hub _UpperCAmelCase =torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase ) original_model.eval() # load state_dict of original model, remove and rename some keys _UpperCAmelCase =original_model.state_dict() if base_model: remove_classification_head_(_lowerCamelCase ) _UpperCAmelCase =create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # load HuggingFace model if base_model: _UpperCAmelCase =ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval() else: _UpperCAmelCase =ViTForImageClassification(_lowerCamelCase ).eval() model.load_state_dict(_lowerCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor _UpperCAmelCase =ViTImageProcessor() _UpperCAmelCase =image_processor(images=prepare_img() , return_tensors="pt" ) _UpperCAmelCase =encoding["pixel_values"] _UpperCAmelCase =model(_lowerCamelCase ) if base_model: _UpperCAmelCase =original_model(_lowerCamelCase ) assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 ) else: _UpperCAmelCase =original_model(_lowerCamelCase ) assert logits.shape == outputs.logits.shape assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCamelCase ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": snake_case__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) snake_case__ : Optional[int] = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
408
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() snake_case__ : Optional[Any] = logging.get_logger(__name__) def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ) ->int: _UpperCAmelCase =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"blocks.{i}.norm1.weight", F"deit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"blocks.{i}.norm1.bias", F"deit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((F"blocks.{i}.attn.proj.weight", F"deit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"blocks.{i}.attn.proj.bias", F"deit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"blocks.{i}.norm2.weight", F"deit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"blocks.{i}.norm2.bias", F"deit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"deit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"deit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"deit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"deit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" _UpperCAmelCase =[(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->List[Any]: for i in range(config.num_hidden_layers ): if base_model: _UpperCAmelCase ="" else: _UpperCAmelCase ="deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase =state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) _UpperCAmelCase =state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase =in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase =in_proj_bias[: config.hidden_size] _UpperCAmelCase =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase =in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase =in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Dict: _UpperCAmelCase =dct.pop(_lowerCamelCase ) _UpperCAmelCase =val def lowerCamelCase__ ( ) ->int: _UpperCAmelCase ="http://images.cocodataset.org/val2017/000000039769.jpg" _UpperCAmelCase =Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->List[str]: _UpperCAmelCase =DeiTConfig() # all deit models have fine-tuned heads _UpperCAmelCase =False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size _UpperCAmelCase =1000 _UpperCAmelCase ="huggingface/label-files" _UpperCAmelCase ="imagenet-1k-id2label.json" _UpperCAmelCase =json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) _UpperCAmelCase ={int(_lowerCamelCase ): v for k, v in idalabel.items()} _UpperCAmelCase =idalabel _UpperCAmelCase ={v: k for k, v in idalabel.items()} _UpperCAmelCase =int(deit_name[-6:-4] ) _UpperCAmelCase =int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): _UpperCAmelCase =192 _UpperCAmelCase =768 _UpperCAmelCase =12 _UpperCAmelCase =3 elif deit_name[9:].startswith("small" ): _UpperCAmelCase =384 _UpperCAmelCase =1536 _UpperCAmelCase =12 _UpperCAmelCase =6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): _UpperCAmelCase =1024 _UpperCAmelCase =4096 _UpperCAmelCase =24 _UpperCAmelCase =16 # load original model from timm _UpperCAmelCase =timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCAmelCase =timm_model.state_dict() _UpperCAmelCase =create_rename_keys(_lowerCamelCase , _lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # load HuggingFace model _UpperCAmelCase =DeiTForImageClassificationWithTeacher(_lowerCamelCase ).eval() model.load_state_dict(_lowerCamelCase ) # Check outputs on an image, prepared by DeiTImageProcessor _UpperCAmelCase =int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 _UpperCAmelCase =DeiTImageProcessor(size=_lowerCamelCase , crop_size=config.image_size ) _UpperCAmelCase =image_processor(images=prepare_img() , return_tensors="pt" ) _UpperCAmelCase =encoding["pixel_values"] _UpperCAmelCase =model(_lowerCamelCase ) _UpperCAmelCase =timm_model(_lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 ) Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(F"Saving model {deit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCamelCase ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": snake_case__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--deit_name', default='vit_deit_base_distilled_patch16_224', type=str, help='Name of the DeiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) snake_case__ : List[str] = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
408
1
from __future__ import annotations from typing import Any class lowerCAmelCase_ : """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> None: __UpperCamelCase , __UpperCamelCase = row, column __UpperCamelCase = [[default_value for c in range(_SCREAMING_SNAKE_CASE )] for r in range(_SCREAMING_SNAKE_CASE )] def __str__( self ) -> str: __UpperCamelCase = f"""Matrix consist of {self.row} rows and {self.column} columns\n""" # Make string identifier __UpperCamelCase = 0 for row_vector in self.array: for obj in row_vector: __UpperCamelCase = max(_SCREAMING_SNAKE_CASE , len(str(_SCREAMING_SNAKE_CASE ) ) ) __UpperCamelCase = f"""%{max_element_length}s""" # Make string and return def single_line(_SCREAMING_SNAKE_CASE ) -> str: nonlocal string_format_identifier __UpperCamelCase = '[' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(_SCREAMING_SNAKE_CASE ) for row_vector in self.array ) return s def __repr__( self ) -> str: return str(self ) def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> bool: if not (isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and len(_SCREAMING_SNAKE_CASE ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any: assert self.validate_indicies(_SCREAMING_SNAKE_CASE ) return self.array[loc[0]][loc[1]] def __setitem__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: assert self.validate_indicies(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = value def __add__( self , _SCREAMING_SNAKE_CASE ) -> Matrix: assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert self.row == another.row and self.column == another.column # Add __UpperCamelCase = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCamelCase = self[r, c] + another[r, c] return result def __neg__( self ) -> Matrix: __UpperCamelCase = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCamelCase = -self[r, c] return result def __sub__( self , _SCREAMING_SNAKE_CASE ) -> Matrix: return self + (-another) def __mul__( self , _SCREAMING_SNAKE_CASE ) -> Matrix: if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ): # Scalar multiplication __UpperCamelCase = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __UpperCamelCase = self[r, c] * another return result elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Matrix multiplication assert self.column == another.row __UpperCamelCase = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __UpperCamelCase = f"""Unsupported type given for another ({type(_SCREAMING_SNAKE_CASE )})""" raise TypeError(_SCREAMING_SNAKE_CASE ) def __lowercase( self ) -> Matrix: __UpperCamelCase = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __UpperCamelCase = self[r, c] return result def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __UpperCamelCase = v.transpose() __UpperCamelCase = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _a ( ) -> None: """simple docstring""" __UpperCamelCase = Matrix(3 , 3 , 0 ) for i in range(3 ): __UpperCamelCase = 1 print(F"""a^(-1) is {ainv}""" ) # u, v __UpperCamelCase = Matrix(3 , 1 , 0 ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1, 2, -3 __UpperCamelCase = Matrix(3 , 1 , 0 ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 4, -2, 5 print(F"""u is {u}""" ) print(F"""v is {v}""" ) print(F"""uv^T is {u * v.transpose()}""" ) # Sherman Morrison print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}""" ) def _a ( ) -> None: """simple docstring""" import doctest doctest.testmod() testa()
713
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel _snake_case = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48_000, 'sample_size': 65_536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48_000, 'sample_size': 65_536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48_000, 'sample_size': 131_072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16_000, 'sample_size': 65_536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16_000, 'sample_size': 65_536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16_000, 'sample_size': 65_536, }, } def _a ( __lowercase , __lowercase ) -> Dict: """simple docstring""" return torch.atana(__lowercase , __lowercase ) / math.pi * 2 def _a ( __lowercase ) -> str: """simple docstring""" __UpperCamelCase = torch.sin(t * math.pi / 2 ) ** 2 __UpperCamelCase = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(__lowercase , __lowercase ) class lowerCAmelCase_ ( _lowercase ): """simple docstring""" pass class lowerCAmelCase_ ( nn.Module ): """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE ) -> Dict: super().__init__() __UpperCamelCase = DiffusionAttnUnetaD(_SCREAMING_SNAKE_CASE , n_attn_layers=4 ) __UpperCamelCase = deepcopy(self.diffusion ) __UpperCamelCase = torch.quasirandom.SobolEngine(1 , scramble=_SCREAMING_SNAKE_CASE ) def _a ( __lowercase ) -> List[Any]: """simple docstring""" __UpperCamelCase = MODELS_MAP[model_name]['url'] os.system(F"""wget {url} ./""" ) return F"""./{model_name}.ckpt""" _snake_case = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } _snake_case = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } _snake_case = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } _snake_case = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } _snake_case = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } _snake_case = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def _a ( __lowercase ) -> Optional[int]: """simple docstring""" if name.startswith('skip' ): return name.replace('skip' , RES_CONV_MAP['skip'] ) # name has to be of format main.{digit} if not name.startswith('main.' ): raise ValueError(F"""ResConvBlock error with {name}""" ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def _a ( __lowercase ) -> List[Any]: """simple docstring""" for key, value in ATTN_MAP.items(): if name.startswith(__lowercase ) and not isinstance(__lowercase , __lowercase ): return name.replace(__lowercase , __lowercase ) elif name.startswith(__lowercase ): return [name.replace(__lowercase , __lowercase ) for v in value] raise ValueError(F"""Attn error with {name}""" ) def _a ( __lowercase , __lowercase=13 ) -> Union[str, Any]: """simple docstring""" __UpperCamelCase = input_string if string.split('.' )[0] == "timestep_embed": return string.replace('timestep_embed' , 'time_proj' ) __UpperCamelCase = 0 if string.startswith('net.3.' ): depth += 1 __UpperCamelCase = string[6:] elif string.startswith('net.' ): __UpperCamelCase = string[4:] while string.startswith('main.7.' ): depth += 1 __UpperCamelCase = string[7:] if string.startswith('main.' ): __UpperCamelCase = string[5:] # mid block if string[:2].isdigit(): __UpperCamelCase = string[:2] __UpperCamelCase = string[2:] else: __UpperCamelCase = string[0] __UpperCamelCase = string[1:] if depth == max_depth: __UpperCamelCase = MID_NUM_TO_LAYER[layer_num] __UpperCamelCase = 'mid_block' elif depth > 0 and int(__lowercase ) < 7: __UpperCamelCase = DOWN_NUM_TO_LAYER[layer_num] __UpperCamelCase = F"""down_blocks.{depth}""" elif depth > 0 and int(__lowercase ) > 7: __UpperCamelCase = UP_NUM_TO_LAYER[layer_num] __UpperCamelCase = F"""up_blocks.{max_depth - depth - 1}""" elif depth == 0: __UpperCamelCase = DEPTH_0_TO_LAYER[layer_num] __UpperCamelCase = F"""up_blocks.{max_depth - 1}""" if int(__lowercase ) > 3 else 'down_blocks.0' if not string_left.startswith('.' ): raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" ) __UpperCamelCase = string_left[1:] if "resnets" in new_layer: __UpperCamelCase = convert_resconv_naming(__lowercase ) elif "attentions" in new_layer: __UpperCamelCase = convert_attn_naming(__lowercase ) __UpperCamelCase = new_string_left if not isinstance(__lowercase , __lowercase ): __UpperCamelCase = prefix + '.' + new_layer + '.' + string_left else: __UpperCamelCase = [prefix + '.' + new_layer + '.' + s for s in string_left] return new_string def _a ( __lowercase ) -> int: """simple docstring""" __UpperCamelCase = {} for k, v in state_dict.items(): if k.endswith('kernel' ): # up- and downsample layers, don't have trainable weights continue __UpperCamelCase = rename(__lowercase ) # check if we need to transform from Conv => Linear for attention if isinstance(__lowercase , __lowercase ): __UpperCamelCase = transform_conv_attns(__lowercase , __lowercase , __lowercase ) else: __UpperCamelCase = v return new_state_dict def _a ( __lowercase , __lowercase , __lowercase ) -> List[str]: """simple docstring""" if len(__lowercase ) == 1: if len(v.shape ) == 3: # weight __UpperCamelCase = v[:, :, 0] else: # bias __UpperCamelCase = v else: # qkv matrices __UpperCamelCase = v.shape[0] __UpperCamelCase = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __UpperCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __UpperCamelCase = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def _a ( __lowercase ) -> Dict: """simple docstring""" __UpperCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) __UpperCamelCase = args.model_path.split('/' )[-1].split('.' )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}""" __UpperCamelCase = download(__lowercase ) __UpperCamelCase = MODELS_MAP[model_name]['sample_rate'] __UpperCamelCase = MODELS_MAP[model_name]['sample_size'] __UpperCamelCase = Object() __UpperCamelCase = sample_size __UpperCamelCase = sample_rate __UpperCamelCase = 0 __UpperCamelCase = UNetaDModel(sample_size=__lowercase , sample_rate=__lowercase ) __UpperCamelCase = diffusers_model.state_dict() __UpperCamelCase = DiffusionUncond(__lowercase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=__lowercase )['state_dict'] ) __UpperCamelCase = orig_model.diffusion_ema.eval() __UpperCamelCase = orig_model.state_dict() __UpperCamelCase = rename_orig_weights(__lowercase ) __UpperCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __UpperCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(__lowercase ) == 0, F"""Problem with {renamed_minus_diffusers}""" assert all(k.endswith('kernel' ) for k in list(__lowercase ) ), F"""Problem with {diffusers_minus_renamed}""" for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}""" if key == "time_proj.weight": __UpperCamelCase = value.squeeze() __UpperCamelCase = value diffusers_model.load_state_dict(__lowercase ) __UpperCamelCase = 100 __UpperCamelCase = 33 __UpperCamelCase = IPNDMScheduler(num_train_timesteps=__lowercase ) __UpperCamelCase = torch.manual_seed(__lowercase ) __UpperCamelCase = torch.randn([1, 2, config.sample_size] , generator=__lowercase ).to(__lowercase ) __UpperCamelCase = torch.linspace(1 , 0 , steps + 1 , device=__lowercase )[:-1] __UpperCamelCase = get_crash_schedule(__lowercase ) __UpperCamelCase = DanceDiffusionPipeline(unet=__lowercase , scheduler=__lowercase ) __UpperCamelCase = torch.manual_seed(33 ) __UpperCamelCase = pipe(num_inference_steps=__lowercase , generator=__lowercase ).audios __UpperCamelCase = sampling.iplms_sample(__lowercase , __lowercase , __lowercase , {} ) __UpperCamelCase = generated.clamp(-1 , 1 ) __UpperCamelCase = (generated - audio).abs().sum() __UpperCamelCase = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print('Diff sum' , __lowercase ) print('Diff max' , __lowercase ) assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/""" print(F"""Conversion for {model_name} successful!""" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') _snake_case = parser.parse_args() main(args)
567
0
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image __a = ['text', 'image', 'audio'] def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : int = [] for input_type in input_types: if input_type == "text": inputs.append('''Text input''' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) ) elif input_type == "audio": inputs.append(torch.ones(3000 ) ) elif isinstance(_lowercase , _lowercase ): inputs.append(create_inputs(_lowercase ) ) else: raise ValueError(f'''Invalid type requested: {input_type}''' ) return inputs def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : Dict = [] for output in outputs: if isinstance(_lowercase , (str, AgentText) ): output_types.append('''text''' ) elif isinstance(_lowercase , (Image.Image, AgentImage) ): output_types.append('''image''' ) elif isinstance(_lowercase , (torch.Tensor, AgentAudio) ): output_types.append('''audio''' ) else: raise ValueError(f'''Invalid output: {output}''' ) return output_types @is_tool_test class __a: """simple docstring""" def a__ ( self ) -> Union[str, Any]: self.assertTrue(hasattr(self.tool ,'''inputs''' ) ) self.assertTrue(hasattr(self.tool ,'''outputs''' ) ) UpperCAmelCase_ : str = self.tool.inputs for _input in inputs: if isinstance(_input ,_SCREAMING_SNAKE_CASE ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) UpperCAmelCase_ : Union[str, Any] = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def a__ ( self ) -> Any: UpperCAmelCase_ : Union[str, Any] = create_inputs(self.tool.inputs ) UpperCAmelCase_ : Any = self.tool(*_SCREAMING_SNAKE_CASE ) # There is a single output if len(self.tool.outputs ) == 1: UpperCAmelCase_ : Optional[int] = [outputs] self.assertListEqual(output_types(_SCREAMING_SNAKE_CASE ) ,self.tool.outputs ) def a__ ( self ) -> Optional[int]: self.assertTrue(hasattr(self.tool ,'''description''' ) ) self.assertTrue(hasattr(self.tool ,'''default_checkpoint''' ) ) self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) ) def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs ) UpperCAmelCase_ : str = self.tool(*_SCREAMING_SNAKE_CASE ) if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Union[str, Any] = [outputs] self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(self.tool.outputs ) ) for output, output_type in zip(_SCREAMING_SNAKE_CASE ,self.tool.outputs ): UpperCAmelCase_ : Union[str, Any] = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ) def a__ ( self ) -> Any: UpperCAmelCase_ : Union[str, Any] = create_inputs(self.tool.inputs ) UpperCAmelCase_ : Tuple = [] for _input, input_type in zip(_SCREAMING_SNAKE_CASE ,self.tool.inputs ): if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error UpperCAmelCase_ : List[Any] = self.tool(*_SCREAMING_SNAKE_CASE ) if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Optional[Any] = [outputs] self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(self.tool.outputs ) )
30
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __lowerCamelCase = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } __lowerCamelCase = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } __lowerCamelCase = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class __A ( SCREAMING_SNAKE_CASE_ ): UpperCAmelCase__ = VOCAB_FILES_NAMES UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ = SqueezeBertTokenizer def __init__( self : Tuple , __snake_case : str=None , __snake_case : Optional[Any]=None , __snake_case : str=True , __snake_case : str="[UNK]" , __snake_case : int="[SEP]" , __snake_case : int="[PAD]" , __snake_case : Any="[CLS]" , __snake_case : List[Any]="[MASK]" , __snake_case : Union[str, Any]=True , __snake_case : Dict=None , **__snake_case : str , ) -> List[Any]: super().__init__( __snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , ) __magic_name__: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , __snake_case ) != do_lower_case or normalizer_state.get("""strip_accents""" , __snake_case ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , __snake_case ) != tokenize_chinese_chars ): __magic_name__: Dict = getattr(__snake_case , normalizer_state.pop("""type""" ) ) __magic_name__: Optional[Any] = do_lower_case __magic_name__: Any = strip_accents __magic_name__: Any = tokenize_chinese_chars __magic_name__: Union[str, Any] = normalizer_class(**__snake_case ) __magic_name__: Optional[Any] = do_lower_case def lowerCamelCase__ ( self : Optional[Any] , __snake_case : List[Any] , __snake_case : int=None ) -> Optional[int]: __magic_name__: int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__ ( self : Any , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]: __magic_name__: Dict = [self.sep_token_id] __magic_name__: List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]: __magic_name__: Any = self._tokenizer.model.save(__snake_case , name=__snake_case ) return tuple(__snake_case )
96
0
"""simple docstring""" import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) __lowerCAmelCase : Dict = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : str , snake_case__ : bool , snake_case__ : str = None , snake_case__ : list = None ): lowerCAmelCase__ = None lowerCAmelCase__ = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) ) lowerCAmelCase__ = os.path.abspath("""examples""" ) for item in os.listdir(snake_case__ ): if item not in EXCLUDE_EXAMPLES: lowerCAmelCase__ = os.path.join(snake_case__ , snake_case__ ) if os.path.isfile(snake_case__ ) and ".py" in item_path: with self.subTest( tested_script=snake_case__ , feature_script=snake_case__ , tested_section="""main()""" if parser_only else """training_function()""" , ): lowerCAmelCase__ = compare_against_test( os.path.join(snake_case__ , snake_case__ ) , snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase__ = """\n""".join(snake_case__ ) if special_strings is not None: for string in special_strings: lowerCAmelCase__ = diff.replace(snake_case__ , """""" ) self.assertEqual(snake_case__ , """""" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): self.one_complete_example("""complete_nlp_example.py""" , snake_case__ ) self.one_complete_example("""complete_nlp_example.py""" , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) ) lowerCAmelCase__ = [ """ """ * 16 + """{\n\n""", """ """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""", """ """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""", """ """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""", """ """ * 20 + """\"epoch\": epoch,\n\n""", """ """ * 16 + """},\n\n""", """ """ * 16 + """step=epoch,\n""", """ """ * 12, """ """ * 8 + """for step, batch in enumerate(active_dataloader):\n""", ] self.one_complete_example("""complete_cv_example.py""" , snake_case__ , snake_case__ , snake_case__ ) self.one_complete_example("""complete_cv_example.py""" , snake_case__ , snake_case__ , snake_case__ ) @mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} ) class a_ ( __UpperCamelCase ): UpperCamelCase_ : Optional[Any] = False @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple ): super().setUpClass() lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = os.path.join(cls._tmpdir , """default_config.yml""" ) write_basic_config(save_location=cls.configPath ) lowerCAmelCase__ = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = F""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = F""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() lowerCAmelCase__ = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) ) def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} """.split() lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=snake_case__ ) self.assertNotIn("""epoch 0:""" , snake_case__ ) self.assertIn("""epoch 1:""" , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} """.split() lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=snake_case__ ) if torch.cuda.is_available(): lowerCAmelCase__ = torch.cuda.device_count() else: lowerCAmelCase__ = 1 if num_processes > 1: self.assertNotIn("""epoch 0:""" , snake_case__ ) self.assertIn("""epoch 1:""" , snake_case__ ) else: self.assertIn("""epoch 0:""" , snake_case__ ) self.assertIn("""epoch 1:""" , snake_case__ ) @slow def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ = """ examples/by_feature/cross_validation.py --num_folds 2 """.split() with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ): lowerCAmelCase__ = run_command(self._launch_args + testargs , return_stdout=snake_case__ ) lowerCAmelCase__ = re.findall("""({.+})""" , snake_case__ ) lowerCAmelCase__ = [r for r in results if """accuracy""" in r][-1] lowerCAmelCase__ = ast.literal_eval(snake_case__ ) self.assertGreaterEqual(results["""accuracy"""] , 0.75 ) def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ = ["""examples/by_feature/multi_process_metrics.py"""] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def _SCREAMING_SNAKE_CASE ( self : Dict ): with tempfile.TemporaryDirectory() as tmpdir: lowerCAmelCase__ = F""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(snake_case__ , """tracking""" ) ) ) def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ = ["""examples/by_feature/gradient_accumulation.py"""] run_command(self._launch_args + testargs ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = ["""examples/by_feature/local_sgd.py"""] run_command(self._launch_args + testargs )
706
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ = 50 ): """simple docstring""" lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"{solution() = }")
674
0
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCAmelCase__ ( _lowerCAmelCase ): def __UpperCamelCase ( self : Optional[int] ) -> Any: """simple docstring""" lowerCamelCase_ : List[str] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase_ , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(UpperCamelCase_ , '''neck_hidden_sizes''' ) ) self.parent.assertTrue(hasattr(UpperCamelCase_ , '''num_attention_heads''' ) ) class lowerCAmelCase__ : def __init__( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Optional[int]=640 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Optional[int]="silu" , UpperCamelCase_ : Any=3 , UpperCamelCase_ : Optional[Any]=32 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : int=10 , UpperCamelCase_ : List[str]=None , ) -> Optional[int]: """simple docstring""" lowerCamelCase_ : List[str] = parent lowerCamelCase_ : List[Any] = batch_size lowerCamelCase_ : Any = image_size lowerCamelCase_ : List[Any] = patch_size lowerCamelCase_ : Union[str, Any] = num_channels lowerCamelCase_ : Optional[Any] = last_hidden_size lowerCamelCase_ : str = num_attention_heads lowerCamelCase_ : Union[str, Any] = hidden_act lowerCamelCase_ : List[str] = conv_kernel_size lowerCamelCase_ : List[Any] = output_stride lowerCamelCase_ : Optional[Any] = hidden_dropout_prob lowerCamelCase_ : Tuple = attention_probs_dropout_prob lowerCamelCase_ : int = classifier_dropout_prob lowerCamelCase_ : List[str] = use_labels lowerCamelCase_ : int = is_training lowerCamelCase_ : Any = num_labels lowerCamelCase_ : Tuple = initializer_range lowerCamelCase_ : Optional[Any] = scope def __UpperCamelCase ( self : int ) -> List[str]: """simple docstring""" lowerCamelCase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ : Dict = None lowerCamelCase_ : Tuple = None if self.use_labels: lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase_ : Any = self.get_config() return config, pixel_values, labels, pixel_labels def __UpperCamelCase ( self : int ) -> int: """simple docstring""" return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict ) -> Optional[int]: """simple docstring""" lowerCamelCase_ : Union[str, Any] = MobileViTModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCamelCase_ : Tuple = model(UpperCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __UpperCamelCase ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str ) -> Tuple: """simple docstring""" lowerCamelCase_ : Optional[int] = self.num_labels lowerCamelCase_ : Any = MobileViTForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCamelCase_ : Optional[int] = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ) -> Any: """simple docstring""" lowerCamelCase_ : Tuple = self.num_labels lowerCamelCase_ : List[Any] = MobileViTForSemanticSegmentation(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCamelCase_ : Union[str, Any] = model(UpperCamelCase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCamelCase_ : Dict = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __UpperCamelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ : Any = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Tuple = config_and_inputs lowerCamelCase_ : List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( _lowerCAmelCase ,_lowerCAmelCase ,unittest.TestCase ): A = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) A = ( { "feature-extraction": MobileViTModel, "image-classification": MobileViTForImageClassification, "image-segmentation": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) A = False A = False A = False A = False def __UpperCamelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ : Union[str, Any] = MobileViTModelTester(self ) lowerCamelCase_ : List[Any] = MobileViTConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ ) def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViT does not use inputs_embeds''' ) def __UpperCamelCase ( self : str ) -> List[str]: """simple docstring""" pass @unittest.skip(reason='''MobileViT does not support input and output embeddings''' ) def __UpperCamelCase ( self : Optional[Any] ) -> int: """simple docstring""" pass @unittest.skip(reason='''MobileViT does not output attentions''' ) def __UpperCamelCase ( self : List[str] ) -> Any: """simple docstring""" pass def __UpperCamelCase ( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : Dict = model_class(UpperCamelCase_ ) lowerCamelCase_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ : Tuple = [*signature.parameters.keys()] lowerCamelCase_ : Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __UpperCamelCase ( self : Any ) -> List[Any]: """simple docstring""" pass def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def __UpperCamelCase ( self : Any ) -> str: """simple docstring""" def check_hidden_states_output(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ): lowerCamelCase_ : List[str] = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): lowerCamelCase_ : Dict = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCamelCase_ : Optional[Any] = outputs.hidden_states lowerCamelCase_ : Optional[Any] = 5 self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowerCamelCase_ : Any = 2 for i in range(len(UpperCamelCase_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowerCamelCase_ , lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : Union[str, Any] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ : int = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __UpperCamelCase ( self : Any ) -> int: """simple docstring""" lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) def __UpperCamelCase ( self : str ) -> str: """simple docstring""" lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ ) @slow def __UpperCamelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ : str = MobileViTModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def __snake_case (): """simple docstring""" lowerCamelCase_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __UpperCamelCase ( self : List[Any] ) -> int: """simple docstring""" return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None @slow def __UpperCamelCase ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ : Optional[int] = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(UpperCamelCase_ ) lowerCamelCase_ : int = self.default_image_processor lowerCamelCase_ : Optional[int] = prepare_img() lowerCamelCase_ : Optional[int] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCamelCase_ : Tuple = model(**UpperCamelCase_ ) # verify the logits lowerCamelCase_ : int = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) lowerCamelCase_ : int = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) ) @slow def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ : List[str] = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) lowerCamelCase_ : Optional[int] = model.to(UpperCamelCase_ ) lowerCamelCase_ : List[Any] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) lowerCamelCase_ : str = prepare_img() lowerCamelCase_ : int = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCamelCase_ : Optional[Any] = model(**UpperCamelCase_ ) lowerCamelCase_ : Optional[int] = outputs.logits # verify the logits lowerCamelCase_ : Tuple = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , UpperCamelCase_ ) lowerCamelCase_ : Union[str, Any] = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] , device=UpperCamelCase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4 ) ) @slow def __UpperCamelCase ( self : int ) -> Dict: """simple docstring""" lowerCamelCase_ : str = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) lowerCamelCase_ : Union[str, Any] = model.to(UpperCamelCase_ ) lowerCamelCase_ : Tuple = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) lowerCamelCase_ : Dict = prepare_img() lowerCamelCase_ : Optional[Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCamelCase_ : int = model(**UpperCamelCase_ ) lowerCamelCase_ : Any = outputs.logits.detach().cpu() lowerCamelCase_ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(50, 60)] ) lowerCamelCase_ : int = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , UpperCamelCase_ ) lowerCamelCase_ : str = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ ) lowerCamelCase_ : Optional[Any] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
501
'''simple docstring''' import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __snake_case (__UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : int = tmp_path / '''cache''' lowerCamelCase_ : str = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase_ : Tuple = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read() _check_text_dataset(__UpperCAmelCase , __UpperCAmelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : Tuple = tmp_path / '''cache''' lowerCamelCase_ : int = {'''text''': '''string'''} lowerCamelCase_ : Optional[Any] = features.copy() if features else default_expected_features lowerCamelCase_ : Dict = ( Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase_ : Optional[int] = TextDatasetReader(__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read() _check_text_dataset(__UpperCAmelCase , __UpperCAmelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : Union[str, Any] = tmp_path / '''cache''' lowerCamelCase_ : Any = {'''text''': '''string'''} lowerCamelCase_ : Optional[Any] = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase , split=__UpperCAmelCase ).read() _check_text_dataset(__UpperCAmelCase , __UpperCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" if issubclass(__UpperCAmelCase , __UpperCAmelCase ): lowerCamelCase_ : List[Any] = text_path elif issubclass(__UpperCAmelCase , __UpperCAmelCase ): lowerCamelCase_ : Optional[int] = [text_path] lowerCamelCase_ : Any = tmp_path / '''cache''' lowerCamelCase_ : Dict = {'''text''': '''string'''} lowerCamelCase_ : Dict = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read() _check_text_dataset(__UpperCAmelCase , __UpperCAmelCase ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=("train",) ): """simple docstring""" assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) for split in splits: lowerCamelCase_ : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : Tuple = tmp_path / '''cache''' lowerCamelCase_ : Optional[Any] = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase_ : Union[str, Any] = TextDatasetReader({'''train''': text_path} , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read() _check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : List[str] = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowerCamelCase_ : Optional[Any] = {'''text''': '''string'''} lowerCamelCase_ : List[Any] = features.copy() if features else default_expected_features lowerCamelCase_ : List[str] = ( Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase_ : str = TextDatasetReader({'''train''': text_path} , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read() _check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" if split: lowerCamelCase_ : Optional[Any] = {split: text_path} else: lowerCamelCase_ : List[str] = '''train''' lowerCamelCase_ : int = {'''train''': text_path, '''test''': text_path} lowerCamelCase_ : Union[str, Any] = tmp_path / '''cache''' lowerCamelCase_ : List[str] = {'''text''': '''string'''} lowerCamelCase_ : str = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read() _check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
501
1
'''simple docstring''' from itertools import count def _A (lowerCAmelCase__ :int = 50 ) -> int: '''simple docstring''' _a = [1] * min_block_length for n in count(lowerCAmelCase__ ): fill_count_functions.append(1 ) for block_length in range(lowerCAmelCase__ , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_00_00_00: break return n if __name__ == "__main__": print(f'''{solution() = }''')
715
'''simple docstring''' def _A (lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] ) -> Optional[int]: '''simple docstring''' print('\nThe shortest path matrix using Floyd Warshall algorithm\n' ) for i in range(lowerCAmelCase__ ): for j in range(lowerCAmelCase__ ): if dist[i][j] != float('inf' ): print(int(dist[i][j] ) , end='\t' ) else: print('INF' , end='\t' ) print() def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> List[str]: '''simple docstring''' _a = [[float('inf' ) for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )] for i in range(lowerCAmelCase__ ): for j in range(lowerCAmelCase__ ): _a = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(lowerCAmelCase__ ): # looping through rows of graph array for i in range(lowerCAmelCase__ ): # looping through columns of graph array for j in range(lowerCAmelCase__ ): if ( dist[i][k] != float('inf' ) and dist[k][j] != float('inf' ) and dist[i][k] + dist[k][j] < dist[i][j] ): _a = dist[i][k] + dist[k][j] _print_dist(lowerCAmelCase__ , lowerCAmelCase__ ) return dist, v if __name__ == "__main__": a_ : Optional[Any] = int(input("Enter number of vertices: ")) a_ : List[Any] = int(input("Enter number of edges: ")) a_ : Dict = [[float("inf") for i in range(v)] for j in range(v)] for i in range(v): a_ : Union[str, Any] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("\nEdge ", i + 1) a_ : Union[str, Any] = int(input("Enter source:")) a_ : int = int(input("Enter destination:")) a_ : Tuple = float(input("Enter weight:")) a_ : Dict = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
532
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case : List[str] = logging.get_logger(__name__) __snake_case : int = { 'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json', } class A ( a ): __UpperCAmelCase : List[str] = "mra" def __init__( self , snake_case_=5_0_2_6_5 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="absolute" , snake_case_=4 , snake_case_="full" , snake_case_=0 , snake_case_=0 , snake_case_=1 , snake_case_=0 , snake_case_=2 , **snake_case_ , ) -> int: super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) _a = vocab_size _a = max_position_embeddings _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = initializer_range _a = type_vocab_size _a = layer_norm_eps _a = position_embedding_type _a = block_per_row _a = approx_mode _a = initial_prior_first_n_blocks _a = initial_prior_diagonal_n_blocks
131
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __UpperCamelCase : int = 'pt' elif is_tf_available(): __UpperCamelCase : int = 'tf' else: __UpperCamelCase : List[Any] = 'jax' class _UpperCamelCase ( A,unittest.TestCase ): '''simple docstring''' a_ : str = PerceiverTokenizer a_ : int = False def _snake_case ( self : Tuple ): '''simple docstring''' super().setUp() __lowerCamelCase : str = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _snake_case ( self : Any ): '''simple docstring''' return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" ) def _snake_case ( self : Optional[int] , **_lowerCamelCase : Dict ): '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _snake_case ( self : int , _lowerCamelCase : int , _lowerCamelCase : List[Any]=False , _lowerCamelCase : int=2_0 , _lowerCamelCase : Optional[int]=5 ): '''simple docstring''' __lowerCamelCase : str = [] for i in range(len(_lowerCamelCase ) ): try: __lowerCamelCase : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCamelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) __lowerCamelCase : Optional[Any] = list(filter(lambda _lowerCamelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , _lowerCamelCase ) ) __lowerCamelCase : Any = list(filter(lambda _lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCamelCase ) , _lowerCamelCase ) ) if max_length is not None and len(_lowerCamelCase ) > max_length: __lowerCamelCase : Union[str, Any] = toks[:max_length] if min_length is not None and len(_lowerCamelCase ) < min_length and len(_lowerCamelCase ) > 0: while len(_lowerCamelCase ) < min_length: __lowerCamelCase : List[str] = toks + toks # toks_str = [t[1] for t in toks] __lowerCamelCase : Optional[int] = [t[0] for t in toks] # Ensure consistency __lowerCamelCase : Union[str, Any] = tokenizer.decode(_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase ) if " " not in output_txt and len(_lowerCamelCase ) > 1: __lowerCamelCase : Optional[Any] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCamelCase ) + """ """ + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCamelCase ) ) if with_prefix_space: __lowerCamelCase : List[str] = """ """ + output_txt __lowerCamelCase : Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) return output_txt, output_ids def _snake_case ( self : List[Any] ): '''simple docstring''' __lowerCamelCase : List[str] = self.perceiver_tokenizer __lowerCamelCase : Union[str, Any] = """Unicode €.""" __lowerCamelCase : str = tokenizer(_lowerCamelCase ) __lowerCamelCase : Optional[int] = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded["""input_ids"""] , _lowerCamelCase ) # decoding __lowerCamelCase : Optional[int] = tokenizer.decode(_lowerCamelCase ) self.assertEqual(_lowerCamelCase , """[CLS]Unicode €.[SEP]""" ) __lowerCamelCase : Dict = tokenizer("""e è é ê ë""" ) __lowerCamelCase : Optional[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded["""input_ids"""] , _lowerCamelCase ) # decoding __lowerCamelCase : List[str] = tokenizer.decode(_lowerCamelCase ) self.assertEqual(_lowerCamelCase , """[CLS]e è é ê ë[SEP]""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = self.perceiver_tokenizer __lowerCamelCase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] # fmt: off __lowerCamelCase : int = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on __lowerCamelCase : List[Any] = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) if FRAMEWORK != "jax": __lowerCamelCase : Tuple = list(batch.input_ids.numpy()[0] ) else: __lowerCamelCase : List[str] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def _snake_case ( self : Dict ): '''simple docstring''' __lowerCamelCase : Dict = self.perceiver_tokenizer __lowerCamelCase : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __lowerCamelCase : Any = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""" , _lowerCamelCase ) self.assertIn("""attention_mask""" , _lowerCamelCase ) self.assertNotIn("""decoder_input_ids""" , _lowerCamelCase ) self.assertNotIn("""decoder_attention_mask""" , _lowerCamelCase ) def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowerCamelCase : List[Any] = self.perceiver_tokenizer __lowerCamelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] __lowerCamelCase : Union[str, Any] = tokenizer( text_target=_lowerCamelCase , max_length=3_2 , padding="""max_length""" , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase ) self.assertEqual(3_2 , targets["""input_ids"""].shape[1] ) def _snake_case ( self : int ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test __lowerCamelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __lowerCamelCase : Tuple = tempfile.mkdtemp() __lowerCamelCase : Any = """ He is very happy, UNwant\u00E9d,running""" __lowerCamelCase : Tuple = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) __lowerCamelCase : str = tokenizer.__class__.from_pretrained(_lowerCamelCase ) __lowerCamelCase : Dict = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) shutil.rmtree(_lowerCamelCase ) __lowerCamelCase : Optional[Any] = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __lowerCamelCase : Union[str, Any] = tempfile.mkdtemp() __lowerCamelCase : int = """ He is very happy, UNwant\u00E9d,running""" tokenizer.add_tokens(["""bim""", """bambam"""] ) __lowerCamelCase : Dict = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) __lowerCamelCase : Optional[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) tokenizer.save_pretrained(_lowerCamelCase ) __lowerCamelCase : int = tokenizer.__class__.from_pretrained(_lowerCamelCase ) __lowerCamelCase : Any = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) __lowerCamelCase : Any = tokenizer.__class__.from_pretrained(_lowerCamelCase , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(_lowerCamelCase ) def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_lowerCamelCase ) with open(os.path.join(_lowerCamelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: __lowerCamelCase : str = json.load(_lowerCamelCase ) with open(os.path.join(_lowerCamelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: __lowerCamelCase : Dict = json.load(_lowerCamelCase ) __lowerCamelCase : Optional[Any] = [F"""<extra_id_{i}>""" for i in range(1_2_5 )] __lowerCamelCase : Optional[int] = added_tokens_extra_ids + [ """an_additional_special_token""" ] __lowerCamelCase : List[Any] = added_tokens_extra_ids + [ """an_additional_special_token""" ] with open(os.path.join(_lowerCamelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(_lowerCamelCase , _lowerCamelCase ) with open(os.path.join(_lowerCamelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(_lowerCamelCase , _lowerCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __lowerCamelCase : List[str] = tokenizer_class.from_pretrained( _lowerCamelCase , ) self.assertIn( """an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __lowerCamelCase : Tuple = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=_lowerCamelCase )] __lowerCamelCase : str = tokenizer_class.from_pretrained( _lowerCamelCase , additional_special_tokens=_lowerCamelCase , ) self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , ) def _snake_case ( self : List[str] ): '''simple docstring''' __lowerCamelCase : List[str] = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , """�""" ) def _snake_case ( self : Dict ): '''simple docstring''' pass def _snake_case ( self : Optional[Any] ): '''simple docstring''' pass def _snake_case ( self : List[Any] ): '''simple docstring''' pass def _snake_case ( self : List[str] ): '''simple docstring''' pass def _snake_case ( self : int ): '''simple docstring''' __lowerCamelCase : int = self.get_tokenizers(fast=_lowerCamelCase , do_lower_case=_lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __lowerCamelCase : Optional[int] = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""] __lowerCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_string(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
519
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class lowerCAmelCase__ : def __init__( self : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple=13 , _lowerCamelCase : List[str]=7 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : int=True , _lowerCamelCase : Any=99 , _lowerCamelCase : Optional[int]=32 , _lowerCamelCase : Dict=2 , _lowerCamelCase : Union[str, Any]=4 , _lowerCamelCase : List[Any]=37 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : str=0.1 , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : Tuple=512 , _lowerCamelCase : str=16 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Optional[Any]=0.0_2 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Any=True , _lowerCamelCase : Any="None" , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : Union[str, Any]=None , ): _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_input_mask _snake_case = use_token_type_ids _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = num_labels _snake_case = num_choices _snake_case = relative_attention _snake_case = position_biased_input _snake_case = pos_att_type _snake_case = scope def lowercase ( self : str ): _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = None if self.use_input_mask: _snake_case = random_attention_mask([self.batch_size, self.seq_length] ) _snake_case = None if self.use_token_type_ids: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case = None _snake_case = None _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_lowerCamelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase ( self : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] ): _snake_case = TFDebertaVaModel(config=_lowerCamelCase ) _snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _snake_case = [input_ids, input_mask] _snake_case = model(_lowerCamelCase ) _snake_case = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] ): _snake_case = TFDebertaVaForMaskedLM(config=_lowerCamelCase ) _snake_case = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _snake_case = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase ( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ): _snake_case = self.num_labels _snake_case = TFDebertaVaForSequenceClassification(config=_lowerCamelCase ) _snake_case = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _snake_case = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase ( self : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Any ): _snake_case = self.num_labels _snake_case = TFDebertaVaForTokenClassification(config=_lowerCamelCase ) _snake_case = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _snake_case = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase ( self : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] ): _snake_case = TFDebertaVaForQuestionAnswering(config=_lowerCamelCase ) _snake_case = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } _snake_case = model(_lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase ( self : List[Any] ): _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = config_and_inputs _snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ): __a = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": TFDebertaVaModel, """fill-mask""": TFDebertaVaForMaskedLM, """question-answering""": TFDebertaVaForQuestionAnswering, """text-classification""": TFDebertaVaForSequenceClassification, """token-classification""": TFDebertaVaForTokenClassification, """zero-shot""": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowercase ( self : int ): _snake_case = TFDebertaVaModelTester(self ) _snake_case = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 ) def lowercase ( self : List[str] ): self.config_tester.run_common_tests() def lowercase ( self : Optional[int] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def lowercase ( self : int ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase ) def lowercase ( self : List[str] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase ) def lowercase ( self : List[Any] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase ) def lowercase ( self : Union[str, Any] ): _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase ) @slow def lowercase ( self : str ): _snake_case = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_lowerCamelCase ) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def lowercase ( self : Optional[int] ): pass @slow def lowercase ( self : Any ): _snake_case = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) _snake_case = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) _snake_case = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0] _snake_case = tf.constant( [[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 )
430
"""simple docstring""" from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _UpperCAmelCase ( __lowerCamelCase : str ) -> None: _snake_case , _snake_case = analyze_text(__lowerCamelCase ) _snake_case = list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. _snake_case = sum(single_char_strings.values() ) # one length string _snake_case = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: _snake_case = single_char_strings[ch] _snake_case = my_str / all_sum my_fir_sum += prob * math.loga(__lowerCamelCase ) # entropy formula. # print entropy print(f'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string _snake_case = sum(two_char_strings.values() ) _snake_case = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: _snake_case = cha + cha if sequence in two_char_strings: _snake_case = two_char_strings[sequence] _snake_case = int(__lowerCamelCase ) / all_sum my_sec_sum += prob * math.loga(__lowerCamelCase ) # print second entropy print(f'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def _UpperCAmelCase ( __lowerCamelCase : str ) -> tuple[dict, dict]: _snake_case = Counter() # type: ignore _snake_case = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(__lowerCamelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _UpperCAmelCase ( ) -> Union[str, Any]: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
430
1
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=os.environ.get('LOGLEVEL', 'INFO').upper(), stream=sys.stdout, ) UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__) UpperCAmelCase_ : Optional[int] = {'facebook/bart-base': BartForConditionalGeneration} UpperCAmelCase_ : List[str] = {'facebook/bart-base': BartTokenizer} def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" a_ : int = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' ) parser.add_argument( '--validation_file' , type=__A , default=__A , help='A csv or a json file containing the validation data.' ) parser.add_argument( '--max_length' , type=__A , default=5 , help='The maximum total input sequence length after tokenization.' , ) parser.add_argument( '--num_beams' , type=__A , default=__A , help=( 'Number of beams to use for evaluation. This argument will be ' 'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.' ) , ) parser.add_argument( '--model_name_or_path' , type=__A , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=__A , ) parser.add_argument( '--config_name' , type=__A , default=__A , help='Pretrained config name or path if not the same as model_name' , ) parser.add_argument( '--device' , type=__A , default='cpu' , help='Device where the model will be run' , ) parser.add_argument('--output_file_path' , type=__A , default=__A , help='Where to store the final ONNX file.' ) a_ : Optional[int] = parser.parse_args() return args def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Union[str, Any]="cpu" ) -> List[Any]: """simple docstring""" a_ : Any = model_dict[model_name].from_pretrained(__A ).to(__A ) a_ : Optional[Any] = tokenizer_dict[model_name].from_pretrained(__A ) if model_name in ["facebook/bart-base"]: a_ : Union[str, Any] = 0 a_ : str = None a_ : Optional[Any] = 0 return huggingface_model, tokenizer def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Union[str, Any] , __A : str , __A : Union[str, Any] , __A : Dict ) -> Tuple: """simple docstring""" model.eval() a_ : List[str] = None a_ : str = torch.jit.script(BARTBeamSearchGenerator(__A ) ) with torch.no_grad(): a_ : List[Any] = 'My friends are cool but they eat too many carbs.' a_ : List[str] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors='pt' ).to(model.device ) a_ : Optional[Any] = model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=__A , max_length=__A , early_stopping=__A , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( __A , ( inputs['input_ids'], inputs['attention_mask'], num_beams, max_length, model.config.decoder_start_token_id, ) , __A , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={ 'input_ids': {0: 'batch', 1: 'seq'}, 'output_ids': {0: 'batch', 1: 'seq_out'}, } , example_outputs=__A , ) logger.info('Model exported to {}'.format(__A ) ) a_ : str = remove_dup_initializers(os.path.abspath(__A ) ) logger.info('Deduplicated and optimized model written to {}'.format(__A ) ) a_ : List[str] = onnxruntime.InferenceSession(__A ) a_ : Union[str, Any] = ort_sess.run( __A , { 'input_ids': inputs['input_ids'].cpu().numpy(), 'attention_mask': inputs['attention_mask'].cpu().numpy(), 'num_beams': np.array(__A ), 'max_length': np.array(__A ), 'decoder_start_token_id': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 ) logger.info('Model outputs from torch and ONNX Runtime are similar.' ) logger.info('Success.' ) def SCREAMING_SNAKE_CASE_ ( ) -> Dict: """simple docstring""" a_ : Optional[int] = parse_args() a_ : Tuple = 5 a_ : Any = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() a_ : Union[str, Any] = torch.device(args.device ) a_ , a_ : Any = load_model_tokenizer(args.model_name_or_path , __A ) if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' ) model.to(__A ) if args.max_length: a_ : Dict = args.max_length if args.num_beams: a_ : Dict = args.num_beams if args.output_file_path: a_ : List[str] = args.output_file_path else: a_ : Any = 'BART.onnx' logger.info('Exporting model to ONNX' ) export_and_validate_model(__A , __A , __A , __A , __A ) if __name__ == "__main__": main()
570
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ): snake_case__ : List[str] = ['''onnx'''] def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Any: requires_backends(self , ['onnx'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: requires_backends(cls , ['onnx'] ) @classmethod def SCREAMING_SNAKE_CASE ( cls : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: requires_backends(cls , ['onnx'] )
570
1
'''simple docstring''' def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int: if exponent == 1: return base if exponent % 2 == 0: lowercase__ : Dict = _modexpt(SCREAMING_SNAKE_CASE_ ,exponent // 2 ,SCREAMING_SNAKE_CASE_ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(SCREAMING_SNAKE_CASE_ ,exponent - 1 ,SCREAMING_SNAKE_CASE_ )) % modulo_value def snake_case_ ( SCREAMING_SNAKE_CASE_ = 17_77 ,SCREAMING_SNAKE_CASE_ = 18_55 ,SCREAMING_SNAKE_CASE_ = 8 ) -> int: lowercase__ : Dict = base for _ in range(1 ,SCREAMING_SNAKE_CASE_ ): lowercase__ : Union[str, Any] = _modexpt(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,10**digits ) return result if __name__ == "__main__": print(f'{solution() = }')
719
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __a : Union[str, Any] = { '''configuration_layoutlmv3''': [ '''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv3Config''', '''LayoutLMv3OnnxConfig''', ], '''processing_layoutlmv3''': ['''LayoutLMv3Processor'''], '''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a : Any = ['''LayoutLMv3TokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a : List[str] = [ '''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv3ForQuestionAnswering''', '''LayoutLMv3ForSequenceClassification''', '''LayoutLMv3ForTokenClassification''', '''LayoutLMv3Model''', '''LayoutLMv3PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a : str = [ '''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLayoutLMv3ForQuestionAnswering''', '''TFLayoutLMv3ForSequenceClassification''', '''TFLayoutLMv3ForTokenClassification''', '''TFLayoutLMv3Model''', '''TFLayoutLMv3PreTrainedModel''', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a : Tuple = ['''LayoutLMv3FeatureExtractor'''] __a : Tuple = ['''LayoutLMv3ImageProcessor'''] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys __a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
298
0
'''simple docstring''' from __future__ import annotations import numpy as np def lowerCAmelCase ( UpperCamelCase__ : np.ndarray ): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase = np.shape(UpperCamelCase__ ) if rows != columns: __UpperCAmelCase = ( '''\'table\' has to be of square shaped array but got a ''' f"""{rows}x{columns} array:\n{table}""" ) raise ValueError(UpperCamelCase__ ) __UpperCAmelCase = np.zeros((rows, columns) ) __UpperCAmelCase = np.zeros((rows, columns) ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): __UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) __UpperCAmelCase = (table[i][j] - total) / upper[j][j] __UpperCAmelCase = 1 for j in range(UpperCamelCase__ , UpperCamelCase__ ): __UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) ) __UpperCAmelCase = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
262
'''simple docstring''' from collections.abc import Sequence def lowerCAmelCase ( UpperCamelCase__ : Sequence[float] , UpperCamelCase__ : bool = False ): """simple docstring""" if not arr: return 0 __UpperCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' ) __UpperCAmelCase = 0.0 for num in arr: __UpperCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num ) __UpperCAmelCase = max(UpperCamelCase__ , UpperCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() __lowerCAmelCase : int = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F"""{max_subarray_sum(nums) = }""")
262
1
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __lowerCamelCase ( unittest.TestCase ): def A__ ( self ) -> Optional[int]: """simple docstring""" UpperCAmelCase: Dict = tempfile.mkdtemp() # fmt: off UpperCAmelCase: Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on UpperCAmelCase: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) UpperCAmelCase: Union[str, Any] = { "do_resize": True, "size": {"height": 1_8, "width": 1_8}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } UpperCAmelCase: Optional[int] = os.path.join(self.tmpdirname , __snake_case ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__snake_case , __snake_case ) def A__ ( self , **__snake_case ) -> Dict: """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **__snake_case ) def A__ ( self , **__snake_case ) -> List[str]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **__snake_case ) def A__ ( self ) -> List[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def A__ ( self ) -> List[str]: """simple docstring""" UpperCAmelCase: Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] UpperCAmelCase: Tuple = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs] return image_inputs def A__ ( self ) -> Tuple: """simple docstring""" UpperCAmelCase: Union[str, Any] = self.get_tokenizer() UpperCAmelCase: int = self.get_image_processor() UpperCAmelCase: Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase: str = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __snake_case ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase: Dict = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase: Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCAmelCase: Dict = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 ) UpperCAmelCase: Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__snake_case , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __snake_case ) def A__ ( self ) -> Any: """simple docstring""" UpperCAmelCase: Dict = self.get_image_processor() UpperCAmelCase: Tuple = self.get_tokenizer() UpperCAmelCase: List[str] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) UpperCAmelCase: Dict = self.prepare_image_inputs() UpperCAmelCase: int = image_processor(__snake_case , return_tensors="np" ) UpperCAmelCase: int = processor(images=__snake_case , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A__ ( self ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase: Dict = self.get_image_processor() UpperCAmelCase: Optional[int] = self.get_tokenizer() UpperCAmelCase: Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) UpperCAmelCase: Optional[int] = "lower newer" UpperCAmelCase: Optional[Any] = processor(text=__snake_case ) UpperCAmelCase: Optional[int] = tokenizer(__snake_case ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A__ ( self ) -> Tuple: """simple docstring""" UpperCAmelCase: Union[str, Any] = self.get_image_processor() UpperCAmelCase: str = self.get_tokenizer() UpperCAmelCase: Any = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) UpperCAmelCase: Union[str, Any] = "lower newer" UpperCAmelCase: Tuple = self.prepare_image_inputs() UpperCAmelCase: Optional[int] = processor(text=__snake_case , images=__snake_case ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with self.assertRaises(__snake_case ): processor() def A__ ( self ) -> List[str]: """simple docstring""" UpperCAmelCase: Any = self.get_image_processor() UpperCAmelCase: List[str] = self.get_tokenizer() UpperCAmelCase: Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) UpperCAmelCase: Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase: int = processor.batch_decode(__snake_case ) UpperCAmelCase: Union[str, Any] = tokenizer.batch_decode(__snake_case ) self.assertListEqual(__snake_case , __snake_case ) def A__ ( self ) -> Any: """simple docstring""" UpperCAmelCase: int = self.get_image_processor() UpperCAmelCase: List[str] = self.get_tokenizer() UpperCAmelCase: Dict = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case ) UpperCAmelCase: str = "lower newer" UpperCAmelCase: List[Any] = self.prepare_image_inputs() UpperCAmelCase: Dict = processor(text=__snake_case , images=__snake_case ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
716
def __UpperCAmelCase ( snake_case_ : int = 6_0_0_8_5_1_4_7_5_1_4_3 ): '''simple docstring''' try: UpperCAmelCase: Optional[int] = int(snake_case_ ) except (TypeError, ValueError): raise TypeError("Parameter n must be int or castable to int." ) if n <= 0: raise ValueError("Parameter n must be greater than or equal to one." ) UpperCAmelCase: List[Any] = 2 UpperCAmelCase: Any = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 UpperCAmelCase: Tuple = i while n % i == 0: UpperCAmelCase: Union[str, Any] = n // i i += 1 return int(snake_case_ ) if __name__ == "__main__": print(f"""{solution() = }""")
166
0