text
stringlengths
31
243k
type
stringclasses
1 value
start
int64
36
275k
end
int64
286
280k
depth
int64
0
1
filepath
stringlengths
85
188
parent_class
stringclasses
3 values
class_index
int64
0
10.8k
class UnivNetKernelPredictorResidualBlock(nn.Module): """ Implementation of the residual block for the kernel predictor network inside each location variable convolution block (LVCBlock). Parameters: config: (`UnivNetConfig`): Config for the `UnivNetModel` model. """ def __init__( self, config: UnivNetConfig, ): super().__init__() self.channels = config.model_in_channels self.kernel_size = config.kernel_predictor_conv_size self.dropout_prob = config.kernel_predictor_dropout self.leaky_relu_slope = config.leaky_relu_slope padding = (self.kernel_size - 1) // 2 self.dropout = nn.Dropout(self.dropout_prob) self.conv1 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True) self.conv2 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True) def forward(self, hidden_states: torch.FloatTensor): # hidden_states should have shape (batch_size, channels, seq_length) residual = hidden_states hidden_states = self.dropout(hidden_states) hidden_states = self.conv1(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.conv2(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) return hidden_states + residual def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv1) weight_norm(self.conv2) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv1) nn.utils.remove_weight_norm(self.conv2)
class_definition
1,832
3,712
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/univnet/modeling_univnet.py
null
4,500
class UnivNetKernelPredictor(nn.Module): """ Implementation of the kernel predictor network which supplies the kernel and bias for the location variable convolutional layers (LVCs) in each UnivNet LVCBlock. Based on the KernelPredictor implementation in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L7). Parameters: config: (`UnivNetConfig`): Config for the `UnivNetModel` model. conv_kernel_size (`int`, *optional*, defaults to 3): The kernel size for the location variable convolutional layer kernels (convolutional weight tensor). conv_layers (`int`, *optional*, defaults to 4): The number of location variable convolutional layers to output kernels and biases for. """ def __init__( self, config: UnivNetConfig, conv_kernel_size: int = 3, conv_layers: int = 4, ): super().__init__() self.conv_in_channels = config.model_hidden_channels self.conv_out_channels = 2 * config.model_hidden_channels self.conv_kernel_size = conv_kernel_size self.conv_layers = conv_layers self.kernel_channels = ( self.conv_in_channels * self.conv_out_channels * self.conv_kernel_size * self.conv_layers ) self.bias_channels = self.conv_out_channels * self.conv_layers self.resnet_in_channels = config.num_mel_bins self.resnet_hidden_channels = config.kernel_predictor_hidden_channels self.resnet_kernel_size = config.kernel_predictor_conv_size self.num_blocks = config.kernel_predictor_num_blocks self.leaky_relu_slope = config.leaky_relu_slope padding = (self.resnet_kernel_size - 1) // 2 self.input_conv = nn.Conv1d(self.resnet_in_channels, self.resnet_hidden_channels, 5, padding=2, bias=True) self.resblocks = nn.ModuleList([UnivNetKernelPredictorResidualBlock(config) for _ in range(self.num_blocks)]) self.kernel_conv = nn.Conv1d( self.resnet_hidden_channels, self.kernel_channels, self.resnet_kernel_size, padding=padding, bias=True ) self.bias_conv = nn.Conv1d( self.resnet_hidden_channels, self.bias_channels, self.resnet_kernel_size, padding=padding, bias=True ) def forward(self, spectrogram: torch.FloatTensor): """ Maps a conditioning log-mel spectrogram to a tensor of convolutional kernels and biases, for use in location variable convolutional layers. Note that the input spectrogram should have shape (batch_size, input_channels, seq_length). Args: spectrogram (`torch.FloatTensor` of shape `(batch_size, input_channels, seq_length)`): Tensor containing the log-mel spectrograms. Returns: Tuple[`torch.FloatTensor, `torch.FloatTensor`]: tuple of tensors where the first element is the tensor of location variable convolution kernels of shape `(batch_size, self.conv_layers, self.conv_in_channels, self.conv_out_channels, self.conv_kernel_size, seq_length)` and the second element is the tensor of location variable convolution biases of shape `(batch_size, self.conv_layers. self.conv_out_channels, seq_length)`. """ batch_size, _, seq_length = spectrogram.shape hidden_states = self.input_conv(spectrogram) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) for resblock in self.resblocks: hidden_states = resblock(hidden_states) kernel_hidden_states = self.kernel_conv(hidden_states) bias_hidden_states = self.bias_conv(hidden_states) # Reshape kernels and biases to appropriate shape kernels = kernel_hidden_states.view( batch_size, self.conv_layers, self.conv_in_channels, self.conv_out_channels, self.conv_kernel_size, seq_length, ).contiguous() biases = bias_hidden_states.view( batch_size, self.conv_layers, self.conv_out_channels, seq_length, ).contiguous() return kernels, biases def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.input_conv) for layer in self.resblocks: layer.apply_weight_norm() weight_norm(self.kernel_conv) weight_norm(self.bias_conv) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.input_conv) for layer in self.resblocks: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.kernel_conv) nn.utils.remove_weight_norm(self.bias_conv)
class_definition
3,715
8,679
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/univnet/modeling_univnet.py
null
4,501
class UnivNetLvcResidualBlock(nn.Module): """ Implementation of the location variable convolution (LVC) residual block for the UnivNet residual network. Parameters: config: (`UnivNetConfig`): Config for the `UnivNetModel` model. kernel_size (`int`): The kernel size for the dilated 1D convolutional layer. dilation (`int`): The dilation for the dilated 1D convolutional layer. """ def __init__( self, config: UnivNetConfig, kernel_size: int, dilation: int, ): super().__init__() self.hidden_channels = config.model_hidden_channels self.kernel_size = kernel_size self.dilation = dilation self.leaky_relu_slope = config.leaky_relu_slope padding = self.dilation * (self.kernel_size - 1) // 2 self.conv = nn.Conv1d( self.hidden_channels, self.hidden_channels, self.kernel_size, padding=padding, dilation=self.dilation, ) def forward(self, hidden_states, kernel, bias, hop_size=256): residual = hidden_states hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.conv(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.location_variable_convolution(hidden_states, kernel, bias, hop_size=hop_size) # Gated activation unit hidden_states = torch.sigmoid(hidden_states[:, : self.hidden_channels, :]) * torch.tanh( hidden_states[:, self.hidden_channels :, :] ) # Skip connection hidden_states = residual + hidden_states return hidden_states # Based on https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L171 def location_variable_convolution( self, hidden_states: torch.FloatTensor, kernel: torch.FloatTensor, bias: torch.FloatTensor, dilation: int = 1, hop_size: int = 256, ): """ Performs location-variable convolution operation on the input sequence (hidden_states) using the local convolution kernel. This was introduced in [LVCNet: Efficient Condition-Dependent Modeling Network for Waveform Generation](https://arxiv.org/abs/2102.10815) by Zhen Zheng, Jianzong Wang, Ning Cheng, and Jing Xiao. Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100. Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, in_channels, in_length)`): The input sequence of shape (batch, in_channels, in_length). kernel (`torch.FloatTensor` of shape `(batch_size, in_channels, out_channels, kernel_size, kernel_length)`): The local convolution kernel of shape (batch, in_channels, out_channels, kernel_size, kernel_length). bias (`torch.FloatTensor` of shape `(batch_size, out_channels, kernel_length)`): The bias for the local convolution of shape (batch, out_channels, kernel_length). dilation (`int`, *optional*, defaults to 1): The dilation of convolution. hop_size (`int`, *optional*, defaults to 256): The hop_size of the conditioning sequence. Returns: `torch.FloatTensor`: the output sequence after performing local convolution with shape (batch_size, out_channels, in_length). """ batch, _, in_length = hidden_states.shape batch, _, out_channels, kernel_size, kernel_length = kernel.shape if in_length != (kernel_length * hop_size): raise ValueError( f"Dim 2 of `hidden_states` should be {kernel_length * hop_size}) but got {in_length}. Please check" " `hidden_states` or `kernel` and `hop_size` to make sure they are correct." ) padding = dilation * int((kernel_size - 1) / 2) # (batch, in_channels, in_length + 2*padding) hidden_states = nn.functional.pad(hidden_states, (padding, padding), "constant", 0) # (batch, in_channels, kernel_length, hop_size + 2*padding) hidden_states = hidden_states.unfold(2, hop_size + 2 * padding, hop_size) if hop_size < dilation: hidden_states = nn.functional.pad(hidden_states, (0, dilation), "constant", 0) # (batch, in_channels, kernel_length, (hop_size + 2*padding)/dilation, dilation) hidden_states = hidden_states.unfold(3, dilation, dilation) hidden_states = hidden_states[:, :, :, :, :hop_size] # (batch, in_channels, kernel_length, dilation, (hop_size + 2*padding)/dilation) hidden_states = hidden_states.transpose(3, 4) # (batch, in_channels, kernel_length, dilation, _, kernel_size) hidden_states = hidden_states.unfold(4, kernel_size, 1) # Apply local convolution kernel to hidden_states. output_hidden_states = torch.einsum("bildsk,biokl->bolsd", hidden_states, kernel) output_hidden_states = output_hidden_states.to(memory_format=torch.channels_last_3d) bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d) output_hidden_states = output_hidden_states + bias output_hidden_states = output_hidden_states.contiguous().view(batch, out_channels, -1) return output_hidden_states def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv)
class_definition
8,682
14,544
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/univnet/modeling_univnet.py
null
4,502
class UnivNetLvcBlock(nn.Module): """ Implementation of the location variable convolution (LVC) residual block of the UnivNet residual block. Includes a `UnivNetKernelPredictor` inside to predict the kernels and biases of the LVC layers. Based on LVCBlock in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L98) Parameters: config (`UnivNetConfig`): Config for the `UnivNetModel` model. layer_id (`int`): An integer corresponding to the index of the current LVC resnet block layer. This should be between 0 and `len(config.resblock_stride_sizes) - 1)` inclusive. lvc_hop_size (`int`, *optional*, defaults to 256): The hop size for the location variable convolutional layers. """ def __init__( self, config: UnivNetConfig, layer_id: int, lvc_hop_size: int = 256, ): super().__init__() self.hidden_channels = config.model_hidden_channels self.kernel_size = config.resblock_kernel_sizes[layer_id] self.stride = config.resblock_stride_sizes[layer_id] self.dilations = config.resblock_dilation_sizes[layer_id] self.cond_hop_length = lvc_hop_size self.leaky_relu_slope = config.leaky_relu_slope self.num_blocks = len(self.dilations) self.convt_pre = nn.ConvTranspose1d( self.hidden_channels, self.hidden_channels, 2 * self.stride, stride=self.stride, padding=self.stride // 2 + self.stride % 2, output_padding=self.stride % 2, ) self.kernel_predictor = UnivNetKernelPredictor(config, self.kernel_size, self.num_blocks) self.resblocks = nn.ModuleList( [UnivNetLvcResidualBlock(config, self.kernel_size, self.dilations[i]) for i in range(self.num_blocks)] ) def forward(self, hidden_states: torch.FloatTensor, spectrogram: torch.FloatTensor): # hidden_states: (batch_size, hidden_channels, seq_length) # spectrogram: (batch_size, cond_channels, cond_length) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.convt_pre(hidden_states) kernels, biases = self.kernel_predictor(spectrogram) for i, resblock in enumerate(self.resblocks): kernel = kernels[:, i, :, :, :, :] bias = biases[:, i, :, :] hidden_states = resblock(hidden_states, kernel, bias, hop_size=self.cond_hop_length) return hidden_states def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.convt_pre) self.kernel_predictor.apply_weight_norm() for layer in self.resblocks: layer.apply_weight_norm() def remove_weight_norm(self): nn.utils.remove_weight_norm(self.convt_pre) self.kernel_predictor.remove_weight_norm() for layer in self.resblocks: layer.remove_weight_norm()
class_definition
14,547
17,767
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/univnet/modeling_univnet.py
null
4,503
class UnivNetModel(PreTrainedModel): config_class = UnivNetConfig main_input_name = "input_features" def __init__(self, config: UnivNetConfig): super().__init__(config) self.num_kernels = len(config.resblock_kernel_sizes) self.leaky_relu_slope = config.leaky_relu_slope self.conv_pre = nn.Conv1d( config.model_in_channels, config.model_hidden_channels, kernel_size=7, stride=1, padding=3, padding_mode="reflect", ) # Initialize location-variable convolution ResNet Blocks. num_layers = len(config.resblock_stride_sizes) hop_length = 1 hop_lengths = [] for stride in config.resblock_stride_sizes: hop_length = hop_length * stride hop_lengths.append(hop_length) self.resblocks = nn.ModuleList( [ UnivNetLvcBlock( config, layer_id=i, lvc_hop_size=hop_lengths[i], ) for i in range(num_layers) ] ) self.conv_post = nn.Conv1d(config.model_hidden_channels, 1, 7, padding=3, padding_mode="reflect") # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UNIVNET_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=UnivNetModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_features: torch.FloatTensor, noise_sequence: Optional[torch.FloatTensor] = None, padding_mask: Optional[torch.FloatTensor] = None, generator: Optional[torch.Generator] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.FloatTensor], UnivNetModelOutput]: r""" Returns: Example: ```python >>> from transformers import UnivNetFeatureExtractor, UnivNetModel >>> from datasets import load_dataset, Audio >>> model = UnivNetModel.from_pretrained("dg845/univnet-dev") >>> feature_extractor = UnivNetFeatureExtractor.from_pretrained("dg845/univnet-dev") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> # Resample the audio to the feature extractor's sampling rate. >>> ds = ds.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate)) >>> inputs = feature_extractor( ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt" ... ) >>> audio = model(**inputs).waveforms >>> list(audio.shape) [1, 140288] ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Resolve batch sizes for noise_sequence and spectrogram spectrogram_batched = input_features.dim() == 3 if not spectrogram_batched: input_features = input_features.unsqueeze(0) spectrogram_batch_size, spectrogram_length, _ = input_features.shape if noise_sequence is not None: noise_sequence_batched = noise_sequence.dim() == 3 if not noise_sequence_batched: noise_sequence = noise_sequence.unsqueeze(0) else: # Randomly generate noise_sequence noise_sequence_shape = (spectrogram_batch_size, spectrogram_length, self.config.model_in_channels) noise_sequence = torch.randn( noise_sequence_shape, generator=generator, dtype=input_features.dtype, device=input_features.device ) noise_sequence_batch_size = noise_sequence.shape[0] if spectrogram_batch_size > 1 and noise_sequence_batch_size == 1: # Repeat noise_sequence spectrogram_batch_size times noise_sequence = noise_sequence.repeat(spectrogram_batch_size, 1, 1) elif noise_sequence_batch_size > 1 and spectrogram_batch_size == 1: # Repeat spectrogram noise_sequence_batch_size times input_features = input_features.repeat(noise_sequence_batch_size, 1, 1) if noise_sequence_batch_size != spectrogram_batch_size: raise ValueError( f"The batch size of `noise_sequence` is {noise_sequence_batch_size} and the batch size of" f" `input_features` is {spectrogram_batch_size}, but the two are expected to be equal." ) if padding_mask is not None: if padding_mask.dim() == 1: padding_mask = padding_mask.unsqueeze(0) padding_mask_batch_size = padding_mask.shape[0] if padding_mask_batch_size != spectrogram_batch_size: raise ValueError( f"The batch size of `padding_mask` is {padding_mask_batch_size} and the batch size of" f" `input_features` is {spectrogram_batch_size}, but the two are expected to be equal." ) # Change shapes to have channels before sequence lengths hidden_states = noise_sequence.transpose(2, 1) input_features = input_features.transpose(2, 1) hidden_states = self.conv_pre(hidden_states) for resblock in self.resblocks: hidden_states = resblock(hidden_states, input_features) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = self.conv_post(hidden_states) hidden_states = torch.tanh(hidden_states) # Remove sequence length dimension since this collapses to 1 # NOTE: keep waveforms batched even if there's only one waveform = hidden_states.squeeze(1) # Get sequence lengths for UnivNetFeatureExtractor.batch_decode. waveform_lengths = None if padding_mask is not None: # Padding is always contiguous and added on the right waveform_lengths = torch.sum(padding_mask, dim=1) if not return_dict: outputs = (waveform, waveform_lengths) return outputs return UnivNetModelOutput( waveforms=waveform, waveform_lengths=waveform_lengths, ) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, nn.Conv1d, nn.ConvTranspose1d)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.conv_pre) for layer in self.resblocks: layer.apply_weight_norm() weight_norm(self.conv_post) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.conv_pre) for layer in self.resblocks: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.conv_post)
class_definition
20,391
27,542
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/univnet/modeling_univnet.py
null
4,504
class BigBirdTokenizer(PreTrainedTokenizer): """ Construct a BigBird tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<s>"`): The begin of sequence token. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = [] def __init__( self, vocab_file, unk_token="<unk>", bos_token="<s>", eos_token="</s>", pad_token="<pad>", sep_token="[SEP]", mask_token="[MASK]", cls_token="[CLS]", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, mask_token=mask_token, cls_token=cls_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.convert_tokens_to_string def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def _decode( self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, spaces_between_special_tokens: bool = True, **kwargs, ) -> str: self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 sub_texts = [] current_sub_text = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: text = re.sub(r" (\[(MASK|SEP)\])", r"\1", " ".join(sub_texts)) else: text = "".join(sub_texts) clean_up_tokenization_spaces = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Big Bird sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
class_definition
1,006
14,216
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/tokenization_big_bird.py
null
4,505
class BigBirdConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BigBirdModel`]. It is used to instantiate an BigBird model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BigBird [google/bigbird-roberta-base](https://huggingface.co/google/bigbird-roberta-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50358): Vocabulary size of the BigBird model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`BigBirdModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimension of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 1024 or 2048 or 4096). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`BigBirdModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. is_decoder (`bool`, *optional*, defaults to `False`): Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. attention_type (`str`, *optional*, defaults to `"block_sparse"`) Whether to use block sparse attention (with n complexity) as introduced in paper or original attention layer (with n^2 complexity). Possible values are `"original_full"` and `"block_sparse"`. use_bias (`bool`, *optional*, defaults to `True`) Whether to use bias in query, key, value. rescale_embeddings (`bool`, *optional*, defaults to `False`) Whether to rescale embeddings with (hidden_size ** 0.5). block_size (`int`, *optional*, defaults to 64) Size of each block. Useful only when `attention_type == "block_sparse"`. num_random_blocks (`int`, *optional*, defaults to 3) Each query is going to attend these many number of random blocks. Useful only when `attention_type == "block_sparse"`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Example: ```python >>> from transformers import BigBirdConfig, BigBirdModel >>> # Initializing a BigBird google/bigbird-roberta-base style configuration >>> configuration = BigBirdConfig() >>> # Initializing a model (with random weights) from the google/bigbird-roberta-base style configuration >>> model = BigBirdModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "big_bird" def __init__( self, vocab_size=50358, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu_new", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=4096, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, sep_token_id=66, attention_type="block_sparse", use_bias=True, rescale_embeddings=False, block_size=64, num_random_blocks=3, classifier_dropout=None, **kwargs, ): super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, sep_token_id=sep_token_id, **kwargs, ) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.type_vocab_size = type_vocab_size self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.rescale_embeddings = rescale_embeddings self.attention_type = attention_type self.use_bias = use_bias self.block_size = block_size self.num_random_blocks = num_random_blocks self.classifier_dropout = classifier_dropout
class_definition
899
7,380
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/configuration_big_bird.py
null
4,506
class BigBirdOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
class_definition
7,383
7,831
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/configuration_big_bird.py
null
4,507
class BigBirdTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" BigBird tokenizer (backed by HuggingFace's *tokenizers* library). Based on [Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. """ vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = BigBirdTokenizer model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = [] def __init__( self, vocab_file=None, tokenizer_file=None, unk_token="<unk>", bos_token="<s>", eos_token="</s>", pad_token="<pad>", sep_token="[SEP]", mask_token="[MASK]", cls_token="[CLS]", **kwargs, ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__( vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs, ) self.vocab_file = vocab_file @property def can_save_slow_tokenizer(self) -> bool: return os.path.isfile(self.vocab_file) if self.vocab_file else False def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An BigBird sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return cls + token_ids_0 + sep return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Set to True if the token list is already formatted with special tokens for the model Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0] if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` if token_ids_1 is None, only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
class_definition
1,206
10,165
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/tokenization_big_bird_fast.py
null
4,508
class FlaxBigBirdForPreTrainingOutput(ModelOutput): """ Output type of [`BigBirdForPreTraining`]. Args: prediction_logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`jnp.ndarray` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ prediction_logits: jnp.ndarray = None seq_relationship_logits: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
class_definition
1,936
3,552
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,509
class FlaxBigBirdForQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). pooled_output (`jnp.ndarray` of shape `(batch_size, hidden_size)`): pooled_output returned by FlaxBigBirdModel. hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ start_logits: jnp.ndarray = None end_logits: jnp.ndarray = None pooled_output: jnp.ndarray = None hidden_states: Optional[Tuple[jnp.ndarray]] = None attentions: Optional[Tuple[jnp.ndarray]] = None
class_definition
3,578
5,180
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,510
class FlaxBigBirdEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" config: BigBirdConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings.setup def setup(self): self.word_embeddings = nn.Embed( self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.position_embeddings = nn.Embed( self.config.max_position_embeddings, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.token_type_embeddings = nn.Embed( self.config.type_vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True): # Embed inputs_embeds = self.word_embeddings(input_ids.astype("i4")) position_embeds = self.position_embeddings(position_ids.astype("i4")) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4")) if self.config.rescale_embeddings: inputs_embeds *= self.config.hidden_size**0.5 # Sum all embeddings hidden_states = inputs_embeds + token_type_embeddings + position_embeds # Layer Norm hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class_definition
9,019
11,035
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,511
class FlaxBigBirdSelfAttention(nn.Module): config: BigBirdConfig causal: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.head_dim = self.config.hidden_size // self.config.num_attention_heads if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError( "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` " " : {self.config.num_attention_heads}" ) self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,)) @nn.compact # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic=True, output_attentions: bool = False, ): # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.query(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.key(key_value_states) value_states = self.value(key_value_states) else: # self_attention key_states = self.key(hidden_states) value_states = self.value(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) # Mask heads if we want to if layer_head_mask is not None: attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs
class_definition
11,137
19,034
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,512
class FlaxBigBirdBlockSparseAttention(nn.Module): config: BigBirdConfig block_sparse_seed: int = None dtype: jnp.dtype = jnp.float32 def setup(self): self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, use_bias=self.config.use_bias, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, use_bias=self.config.use_bias, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, use_bias=self.config.use_bias, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) @staticmethod def transpose_for_scores(x, n_heads, head_size): new_x_shape = x.shape[:-1] + (n_heads, head_size) x = x.reshape(*new_x_shape) return jnp.transpose(x, axes=(0, 2, 1, 3)) def __call__( self, hidden_states, attention_mask, deterministic=True, output_attentions=False, ): n_heads = self.config.num_attention_heads head_size = self.config.hidden_size // n_heads blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn( attention_mask, self.config.block_size ) query_layer = self.transpose_for_scores(self.query(hidden_states), n_heads, head_size) key_layer = self.transpose_for_scores(self.key(hidden_states), n_heads, head_size) value_layer = self.transpose_for_scores(self.value(hidden_states), n_heads, head_size) indices_prng_key = None if not deterministic: indices_prng_key = self.make_rng("indices") attn_output, attn_weights = self.bigbird_block_sparse_attention( query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, blocked_encoder_mask, blocked_encoder_mask, n_heads, head_size, indices_prng_key=indices_prng_key, deterministic=deterministic, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=output_attentions, ) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs @staticmethod def create_masks_for_block_sparse_attn(attention_mask, block_size: int): batch_size, seq_length = attention_mask.shape if seq_length % block_size != 0: raise ValueError( f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block" f" size is {block_size}." ) def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Returns: float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. """ exp_blocked_to_pad = jnp.concatenate( [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], axis=2 ) band_mask = jnp.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask = jnp.expand_dims(band_mask, 1) return band_mask blocked_encoder_mask = attention_mask.reshape(batch_size, seq_length // block_size, block_size) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) from_mask = attention_mask.reshape(batch_size, 1, seq_length, 1) to_mask = attention_mask.reshape(batch_size, 1, 1, seq_length) return blocked_encoder_mask, band_mask, from_mask, to_mask def bigbird_block_sparse_attention( self, query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, n_heads, head_size, indices_prng_key: Optional[jax.random.PRNGKey] = None, deterministic: Optional[bool] = True, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=None, ): # BigBird block-sparse attention as suggested in paper # ITC: # global tokens: 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # ETC: # global tokens: extra_globals_tokens + 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # Note: # 1) Currently, ETC is not supported. # 2) Window size is fixed to 3 blocks & it can be changed only by # changing `block_size`. # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be # controlled only by `block_size`. # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of # shifting tokens (for calculating sliding attention). hence following code can be divided into 5 parts. bsz, _, from_seq_len, _ = query_layer.shape to_seq_len = key_layer.shape[2] from_block_size = to_block_size = self.config.block_size if from_seq_len % from_block_size != 0: raise ValueError("Query sided sequence length must be multiple of block size") if to_seq_len % to_block_size != 0: raise ValueError("Key/Value sided sequence length must be multiple of block size") if from_seq_len // from_block_size != to_seq_len // to_block_size: raise ValueError("Error the number of blocks needs to be same!") n_rand_blocks = self.config.num_random_blocks rsqrt_d = 1 / jnp.sqrt(head_size) attn_mask_penalty = -10000.0 if from_seq_len in [1024, 3072, 4096]: # old plans used in paper max_seqlen = self.config.max_position_embeddings rand_attn = [ self._bigbird_block_rand_mask( max_seqlen, max_seqlen, from_block_size, to_block_size, n_rand_blocks, indices_prng_key=indices_prng_key, deterministic=deterministic, last_idx=1024, )[: (from_seq_len // from_block_size - 2)] for _ in range(n_heads) ] else: if plan_from_length is None: plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan( from_seq_len, from_block_size, n_rand_blocks ) rand_attn = self._bigbird_block_rand_mask_with_head( from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, num_heads=n_heads, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks, indices_prng_key=indices_prng_key, ) rand_attn = jnp.stack(rand_attn, axis=0) rand_attn = jnp.broadcast_to(rand_attn, (bsz,) + rand_attn.shape) rand_mask = self._create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size ) blocked_query_matrix = query_layer.reshape(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = key_layer.reshape(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) blocked_value_matrix = value_layer.reshape(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) shape = (bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1) gathered_key = self.jax_gather(blocked_key_matrix, rand_attn, batch_dims=2).reshape(*shape) gathered_value = self.jax_gather(blocked_value_matrix, rand_attn, batch_dims=2).reshape(*shape) # 1st PART # 1st block (global block) attention scores # q[0] x (k[0], k[1], k[2], k[3], k[4] .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] first_product = jnp.einsum("bhqd,bhkd->bhqk", blocked_query_matrix[:, :, 0], key_layer) first_product = first_product * rsqrt_d first_product += (1.0 - to_mask) * attn_mask_penalty first_attn_weights = jax.nn.softmax(first_product, axis=-1) # [bsz, n_heads, from_block_size, to_seq_len] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] first_context_layer = jnp.einsum("bhqk,bhkd->bhqd", first_attn_weights, value_layer) first_context_layer = jnp.expand_dims(first_context_layer, 2) # 2nd PART # 2nd block attention scores # q[1] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> 2nd, 3rd blocks # global key blocks -> 1st block second_key_mat = jnp.concatenate( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0], ], axis=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] second_value_mat = jnp.concatenate( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0], ], axis=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] # ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_product = jnp.einsum("bhqd,bhkd->bhqk", blocked_query_matrix[:, :, 1], second_key_mat) second_seq_pad = jnp.concatenate( [ to_mask[:, :, :, : 3 * to_block_size], to_mask[:, :, :, -to_block_size:], jnp.ones([bsz, 1, 1, n_rand_blocks * to_block_size], dtype=to_mask.dtype), ], axis=3, ) second_rand_pad = jnp.concatenate( [ jnp.ones([bsz, n_heads, from_block_size, 4 * to_block_size], dtype=rand_mask.dtype), rand_mask[:, :, 0], ], axis=3, ) second_product = second_product * rsqrt_d second_product += (1.0 - jnp.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = jax.nn.softmax( second_product, axis=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+r)*to_block_size] x [bsz, n_heads, (4+r)*to_block_size, -1] # ==> [bsz, n_heads, from_block_size, -1] second_context_layer = jnp.einsum("bhqk,bhkd->bhqd", second_attn_weights, second_value_mat) second_context_layer = jnp.expand_dims(second_context_layer, 2) # 3rd PART # Middle blocks attention scores # q[-2:2] x (sliding_keys, random_keys, global_keys) # sliding attn is calculated using special trick of shifting tokens as discussed in paper # random keys are generated by taking random indices as per `rand_attn` # global keys -> 1st & last block exp_blocked_key_matrix = jnp.concatenate( [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], axis=3 ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] exp_blocked_value_matrix = jnp.concatenate( [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]], axis=3, ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] middle_query_matrix = blocked_query_matrix[:, :, 2:-2] # sliding attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] inner_band_product = jnp.einsum("bhlqd,bhlkd->bhlqk", middle_query_matrix, exp_blocked_key_matrix) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size] inner_band_product = inner_band_product * rsqrt_d # randn attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] rand_band_product = jnp.einsum("bhlqd,bhlkd->bhlqk", middle_query_matrix, gathered_key[:, :, 1:-1]) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] rand_band_product = rand_band_product * rsqrt_d # Including 1st block (since it's global) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] first_band_product = jnp.einsum("bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0]) first_band_product = first_band_product * rsqrt_d # Including last block (since it's global) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] last_band_product = jnp.einsum("bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1]) last_band_product = last_band_product * rsqrt_d # masking padded tokens inner_band_product += (1.0 - band_mask) * attn_mask_penalty first_band_product += (1.0 - jnp.expand_dims(to_mask[:, :, :, :to_block_size], 3)) * attn_mask_penalty last_band_product += (1.0 - jnp.expand_dims(to_mask[:, :, :, -to_block_size:], 3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty # completing attention scores matrix for all q[-2:2] band_product = jnp.concatenate( [first_band_product, inner_band_product, rand_band_product, last_band_product], axis=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # safely doing softmax since attention matrix is completed attn_weights = jax.nn.softmax( band_product, axis=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # contribution of sliding keys # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] # x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] context_layer = jnp.einsum( "bhlqk,bhlkd->bhlqd", attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of random keys # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] # x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] context_layer += jnp.einsum( "bhlqk,bhlkd->bhlqd", attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of global keys # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] context_layer += jnp.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] context_layer += jnp.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1] ) # 4th PART # last 2nd token attention scores # q[-2] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> last 3 blocks # global key block -> 1st block # random key block -> based on indices stored in `randn_attn` second_last_key_mat = jnp.concatenate( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1], ], axis=2, ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1] second_last_value_mat = jnp.concatenate( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1], ], axis=2, ) # [bsz, n_heads, (4+r)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] # ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_last_product = jnp.einsum("bhqd,bhkd->bhqk", blocked_query_matrix[:, :, -2], second_last_key_mat) second_last_seq_pad = jnp.concatenate( [ to_mask[:, :, :, :to_block_size], to_mask[:, :, :, -3 * to_block_size :], jnp.ones([bsz, 1, 1, n_rand_blocks * to_block_size], dtype=to_mask.dtype), ], axis=3, ) second_last_rand_pad = jnp.concatenate( [ jnp.ones([bsz, n_heads, from_block_size, 4 * to_block_size], dtype=rand_mask.dtype), rand_mask[:, :, -1], ], axis=3, ) second_last_product = second_last_product * rsqrt_d second_last_product += (1.0 - jnp.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty second_last_attn_weights = jax.nn.softmax( second_last_product, axis=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] # ==> [bsz, n_heads, from_block_size, -1] second_last_context_layer = jnp.einsum("bhqk,bhkd->bhqd", second_last_attn_weights, second_last_value_mat) second_last_context_layer = jnp.expand_dims(second_last_context_layer, 2) # 5th PART # last block (global) attention scores # q[-1] x (k[0], k[1], k[2], k[3], .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] last_product = jnp.einsum("bhqd,bhkd->bhqk", blocked_query_matrix[:, :, -1], key_layer) last_product = last_product * rsqrt_d last_product += (1.0 - to_mask) * attn_mask_penalty last_attn_weights = jax.nn.softmax(last_product, axis=-1) # [bsz, n_heads, from_block_size, n] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] last_context_layer = jnp.einsum("bhqk,bhkd->bhqd", last_attn_weights, value_layer) last_context_layer = jnp.expand_dims(last_context_layer, 2) # combining representations of all tokens context_layer = jnp.concatenate( [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer], axis=2, ) context_layer = context_layer.reshape(bsz, n_heads, from_seq_len, -1) * from_mask context_layer = jnp.transpose(context_layer, axes=(0, 2, 1, 3)).reshape(bsz, from_seq_len, -1) attention_probs = None return context_layer, attention_probs @staticmethod def jax_gather(params, indices, batch_dims=2): """ Gather the indices from params correctly (equivalent to tf.gather but with modifications) Args: params: (bsz, n_heads, num_blocks, block_size, head_dim) indices: (<num_blocks, 1) """ def _jax_gather(params, indices): return params[indices] for _ in range(batch_dims): _jax_gather = jax.vmap(_jax_gather, in_axes=(0, 0)) return _jax_gather(params, indices) # params.shape[:batch_dims] + indices.shape + params.shape[batch_dims+1:] def _create_rand_mask_from_inputs( self, from_blocked_mask, to_blocked_mask, broadcasted_rand_attn, num_attention_heads, num_random_blocks, batch_size, from_seq_length, from_block_size, ): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. broadcasted_rand_attn: [batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks] num_attention_heads: int. Number of attention heads. num_random_blocks: int. Number of random chunks per row. batch_size: int. Batch size for computation. from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. Returns: float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2, from_block_size, num_rand_blocks*to_block_size]. """ num_windows = from_seq_length // from_block_size - 2 rand_mask = self.jax_gather(to_blocked_mask, broadcasted_rand_attn, batch_dims=1) rand_mask = rand_mask.reshape( batch_size, num_attention_heads, num_windows, num_random_blocks * from_block_size ) rand_mask = jnp.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): """ Gives the plan of where to put random attention. Args: from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. num_rand_blocks: int. Number of random chunks per row. Returns: plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for each block """ plan_from_length = [] plan_num_rand_blocks = [] if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif (num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2)) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return plan_from_length, plan_num_rand_blocks @staticmethod def _bigbird_block_rand_mask( from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, indices_prng_key: Optional[jax.random.PRNGKey] = None, deterministic: Optional[bool] = True, last_idx: Optional[int] = -1, ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_rand_blocks: int. Number of random chunks per row. indices_prng_key: jax.random.PRNGKey. PRNG key that is used to perform random jax operations. deterministic: bool. When False random attention will be used. last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence, if positive then num_rand_blocks blocks chosen only up to last_idx. Returns: adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rand_attn = jnp.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=jnp.int32) # deterministic nor randomness if deterministic: return rand_attn middle_seq = jnp.arange(1, to_seq_length // to_block_size - 1, dtype=jnp.int32) last = to_seq_length // to_block_size - 1 if last_idx > (2 * to_block_size): last = (last_idx // to_block_size) - 1 r = num_rand_blocks # shorthand for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: seq_values = jax.random.permutation(indices_prng_key, middle_seq[2:last])[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) elif i == 2: seq_values = jax.random.permutation(indices_prng_key, middle_seq[3:last])[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) elif i == from_seq_length // from_block_size - 3: seq_values = jax.random.permutation(indices_prng_key, middle_seq[:last])[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) # Missing -3: should have been sliced till last-3 elif i == from_seq_length // from_block_size - 2: seq_values = jax.random.permutation(indices_prng_key, middle_seq[:last])[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) # Missing -4: should have been sliced till last-4 else: if start > last: start = last seq_values = jax.random.permutation(indices_prng_key, middle_seq[:start])[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) elif (end + 1) == last: seq_values = jax.random.permutation(indices_prng_key, middle_seq[:start])[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) else: concat_values = jnp.concatenate((middle_seq[:start], middle_seq[end + 1 : last])) seq_values = jax.random.permutation(indices_prng_key, concat_values)[:r] rand_attn = rand_attn.at[i - 1].set(seq_values) return rand_attn def _bigbird_block_rand_mask_with_head( self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, indices_prng_key: Optional[jax.random.PRNGKey] = None, deterministic: Optional[bool] = True, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1, ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_heads: int. total number of heads. plan_from_length: list. plan from length where num_random_blocks are choosen from. plan_num_rand_blocks: list. number of rand blocks within the plan. indices_prng_key: jax.random.PRNGKey. PRNG key that is used to perform random jax operations. deterministic: bool. When False random attention will be used. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_top: int. number of blocks at the top. global_block_bottom: int. number of blocks at the bottom. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length not in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") if from_seq_length not in plan_from_length: raise ValueError("Error from sequence length not in plan!") # Total number of blocks in the mmask num_blocks = from_seq_length // from_block_size # Number of blocks per plan plan_block_length = jnp.array(plan_from_length) // from_block_size # till when to follow plan max_plan_idx = plan_from_length.index(from_seq_length) # Random Attention adjacency list rand_attn = [ jnp.zeros((num_blocks, sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=jnp.int32) for i in range(num_heads) ] # deterministic if deterministic: for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn # We will go iteratively over the plan blocks and pick random number of # Attention blocks from the legally allowed blocks for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: # set the row for all from_blocks starting from 0 to # plan_block_length[plan_idx-1] # column indx start fromm plan_block_length[plan_idx-1] and ends at # plan_block_length[plan_idx] if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(sum(plan_num_rand_blocks[: plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(num_heads): single_block_row_attention = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, indices_prng_key=indices_prng_key, ) rand_attn[h] = ( rand_attn[h].at[blk_rw_idx, rnd_r_cnt:curr_r_cnt].set(single_block_row_attention) ) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(sum(plan_num_rand_blocks[: pl_id + 1])) for h in range(num_heads): single_block_row_attention = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, indices_prng_key=indices_prng_key, ) rand_attn[h] = ( rand_attn[h].at[blk_rw_idx, rnd_r_cnt:curr_r_cnt].set(single_block_row_attention) ) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(sum(plan_num_rand_blocks[: plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(num_heads): single_block_row_attention = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, indices_prng_key=indices_prng_key, ) rand_attn[h] = rand_attn[h].at[blk_rw_idx, rnd_r_cnt:curr_r_cnt].set(single_block_row_attention) for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention( block_id, to_start_block_id, to_end_block_id, num_rand_blocks, indices_prng_key: Optional[jax.random.PRNGKey] = None, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1, ): """ For a single row block get random row attention. Args: block_id: int. block id of row. to_start_block_id: int. random attention column start id. to_end_block_id: int. random attention column end id. num_rand_blocks: int. number of random blocks to be selected. indices_prng_key: jax.random.PRNGKey. PRNG key that is used to perform random jax operations window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: row containing the random attention vector of size num_rand_blocks. """ # list of to_blocks from which to choose random attention to_block_list = jnp.arange(to_start_block_id, to_end_block_id, dtype=jnp.int32) # permute the blocks perm_block = jax.random.permutation(indices_prng_key, to_block_list) # illegal blocks for the current block id, using window illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1)) # Add blocks at the start and at the end illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) # The second from_block cannot choose random attention on second last to_block if block_id == 1: illegal_blocks.append(to_end_block_id - 2) # The second last from_block cannot choose random attention on second to_block if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blocks = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blocks.append(perm_block[i]) if len(selected_random_blocks) == num_rand_blocks: break return jnp.array(selected_random_blocks, dtype=jnp.int32)
class_definition
19,037
58,387
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,513
class FlaxBigBirdSelfOutput(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class_definition
58,486
59,306
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,514
class FlaxBigBirdAttention(nn.Module): config: BigBirdConfig layer_id: int = None causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): if self.config.attention_type == "original_full": self.self = FlaxBigBirdSelfAttention(self.config, causal=self.causal, dtype=self.dtype) elif self.config.attention_type == "block_sparse": self.self = FlaxBigBirdBlockSparseAttention(self.config, block_sparse_seed=self.layer_id, dtype=self.dtype) else: raise ValueError( f"Your `config.attention_type` is {self.config.attention_type} but it can either be `original_full` or" " `block_sparse`" ) self.output = FlaxBigBirdSelfOutput(self.config, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool = False, ): # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length) # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length) if self.config.attention_type == "original_full": attn_outputs = self.self( hidden_states, attention_mask, layer_head_mask=layer_head_mask, key_value_states=key_value_states, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) else: attn_outputs = self.self( hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions, ) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs
class_definition
59,309
61,525
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,515
class FlaxBigBirdIntermediate(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states
class_definition
61,626
62,210
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,516
class FlaxBigBirdOutput(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states, attention_output, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.LayerNorm(hidden_states + attention_output) return hidden_states
class_definition
62,305
63,129
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,517
class FlaxBigBirdLayer(nn.Module): config: BigBirdConfig layer_id: int = None dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = FlaxBigBirdAttention( self.config, layer_id=self.layer_id, causal=self.config.is_decoder, dtype=self.dtype ) self.intermediate = FlaxBigBirdIntermediate(self.config, dtype=self.dtype) self.output = FlaxBigBirdOutput(self.config, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = FlaxBigBirdAttention(self.config, causal=False, dtype=self.dtype) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer.__call__ with Bert->BigBird def __call__( self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, ): # Self Attention attention_outputs = self.attention( hidden_states, attention_mask, layer_head_mask=layer_head_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = attention_outputs[0] # Cross-Attention Block if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_states, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) if encoder_hidden_states is not None: outputs += (cross_attention_outputs[1],) return outputs
class_definition
63,132
65,465
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,518
class FlaxBigBirdLayerCollection(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: FlaxBigBirdCheckpointLayer = remat(FlaxBigBirdLayer, static_argnums=(5, 6, 7)) self.layers = [ FlaxBigBirdCheckpointLayer(self.config, layer_id=i, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] else: self.layers = [ FlaxBigBirdLayer(self.config, layer_id=i, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection.__call__ with Bert->BigBird def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # Check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.shape[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for " f" {head_mask.shape[0]}." ) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, head_mask[i] if head_mask is not None else None, encoder_hidden_states, encoder_attention_mask, init_cache, deterministic, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, )
class_definition
65,468
68,632
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,519
class FlaxBigBirdEncoder(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.layer = FlaxBigBirdLayerCollection( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return self.layer( hidden_states, attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, )
class_definition
68,728
69,974
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,520
class FlaxBigBirdPredictionHeadTransform(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) self.activation = ACT2FN[self.config.hidden_act] self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return self.LayerNorm(hidden_states)
class_definition
70,086
70,633
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,521
class FlaxBigBirdLMPredictionHead(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros def setup(self): self.transform = FlaxBigBirdPredictionHeadTransform(self.config, dtype=self.dtype) self.decoder = nn.Dense(self.config.vocab_size, dtype=self.dtype, use_bias=False) self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.transform(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) bias = jnp.asarray(self.bias, self.dtype) hidden_states += bias return hidden_states
class_definition
70,763
71,669
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,522
class FlaxBigBirdOnlyMLMHead(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = FlaxBigBirdLMPredictionHead(self.config, dtype=self.dtype) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.predictions(hidden_states, shared_embedding=shared_embedding) return hidden_states
class_definition
71,769
72,161
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,523
class FlaxBigBirdPreTrainingHeads(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.predictions = FlaxBigBirdLMPredictionHead(self.config, dtype=self.dtype) self.seq_relationship = nn.Dense(2, dtype=self.dtype) def __call__(self, hidden_states, pooled_output, shared_embedding=None): prediction_scores = self.predictions(hidden_states, shared_embedding=shared_embedding) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score
class_definition
72,164
72,740
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,524
class FlaxBigBirdPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BigBirdConfig base_model_prefix = "bert" module_class: nn.Module = None def __init__( self, config: BigBirdConfig, input_shape: Optional[tuple] = None, seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, gradient_checkpointing: bool = False, **kwargs, ): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) if config.attention_type == "block_sparse" and input_shape is None: input_shape = (1, 12 * config.block_size) elif input_shape is None: input_shape = (1, 1) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") token_type_ids = jnp.zeros_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) params_rng, dropout_rng, indices_rng = jax.random.split(rng, num=3) rngs = {"params": params_rng, "dropout": dropout_rng, "indices": indices_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states, encoder_attention_mask, return_dict=False, ) else: module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False, ) random_params = module_init_outputs["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length), dtype="i4") attention_mask = jnp.ones_like(input_ids, dtype="i4") position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"]) @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, params: dict = None, dropout_rng: Optional[jax.random.PRNGKey] = None, indices_rng: Optional[jax.random.PRNGKey] = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, past_key_values: dict = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # init input tensors if not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) # Handle any PRNG if needed rngs = {} if indices_rng is not None: rngs["indices"] = indices_rng if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} if self.config.add_cross_attention: # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be # changed by FlaxBigBirdAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] else: outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, ) return outputs
class_definition
72,743
81,216
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,525
class FlaxBigBirdModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation add_pooling_layer: bool = True gradient_checkpointing: bool = False def setup(self): self.embeddings = FlaxBigBirdEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxBigBirdEncoder( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) self.pooler = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): hidden_states = self.embeddings( input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic ) outputs = self.encoder( hidden_states, attention_mask, head_mask=head_mask, deterministic=deterministic, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] pooled = nn.tanh(self.pooler(hidden_states[:, 0, :])) if self.add_pooling_layer else None if not return_dict: # if pooled is None, don't return it if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, )
class_definition
81,219
83,590
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,526
class FlaxBigBirdModel(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdModule
class_definition
83,844
83,932
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,527
class FlaxBigBirdForPreTrainingModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBigBirdModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.cls = FlaxBigBirdPreTrainingHeads(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.bert( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.tie_word_embeddings: shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None hidden_states = outputs[0] pooled_output = outputs[1] prediction_scores, seq_relationship_score = self.cls( hidden_states, pooled_output, shared_embedding=shared_embedding ) if not return_dict: return (prediction_scores, seq_relationship_score) + outputs[2:] return FlaxBigBirdForPreTrainingOutput( prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
84,160
86,049
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,528
class FlaxBigBirdForPreTraining(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForPreTrainingModule
class_definition
86,390
86,501
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,529
class FlaxBigBirdForMaskedLMModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBigBirdModule( config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.cls = FlaxBigBirdOnlyMLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.bert( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None # Compute the prediction scores logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxMaskedLMOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
87,495
89,216
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,530
class FlaxBigBirdForMaskedLM(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForMaskedLMModule
class_definition
89,425
89,530
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,531
class FlaxBigBirdClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype) classifier_dropout = ( self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, features, deterministic=True): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x, deterministic=deterministic) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x, deterministic=deterministic) x = self.out_proj(x) return x
class_definition
89,646
90,579
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,532
class FlaxBigBirdForSequenceClassificationModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBigBirdModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing ) self.classifier = FlaxBigBirdClassificationHead(self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.bert( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output, deterministic=deterministic) if not return_dict: return (logits,) + outputs[2:] return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
90,582
92,035
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,533
class FlaxBigBirdForSequenceClassification(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForSequenceClassificationModule
class_definition
92,375
92,508
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,534
class FlaxBigBirdForMultipleChoiceModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBigBirdModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None # Model outputs = self.bert( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput( logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
92,776
94,899
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,535
class FlaxBigBirdForMultipleChoice(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForMultipleChoiceModule def __init__( self, config: BigBirdConfig, input_shape: Optional[tuple] = None, seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, **kwargs, ): if config.attention_type == "block_sparse" and input_shape is None: input_shape = (1, 1, 12 * config.block_size) elif input_shape is None: input_shape = (1, 1) super().__init__(config, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
class_definition
95,137
95,783
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,536
class FlaxBigBirdForTokenClassificationModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBigBirdModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) classifier_dropout = ( self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.bert( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
96,188
97,985
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,537
class FlaxBigBirdForTokenClassification(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForTokenClassificationModule
class_definition
98,329
98,456
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,538
class FlaxBigBirdForQuestionAnsweringHead(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.intermediate = FlaxBigBirdIntermediate(self.config, dtype=self.dtype) self.output = FlaxBigBirdOutput(self.config, dtype=self.dtype) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__(self, encoder_output, deterministic=True): hidden_states = self.dropout(encoder_output, deterministic=deterministic) hidden_states = self.intermediate(hidden_states) hidden_states = self.output(hidden_states, encoder_output) hidden_states = self.qa_outputs(hidden_states) return hidden_states
class_definition
98,609
99,399
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,539
class FlaxBigBirdForQuestionAnsweringModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 add_pooling_layer: bool = False gradient_checkpointing: bool = False def setup(self): self.config.num_labels = 2 self.bert = FlaxBigBirdModule( self.config, dtype=self.dtype, add_pooling_layer=self.add_pooling_layer, gradient_checkpointing=self.gradient_checkpointing, ) self.qa_classifier = FlaxBigBirdForQuestionAnsweringHead(self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, logits_mask=None, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.bert( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] pooled_output = outputs[1] if self.add_pooling_layer else None logits = self.qa_classifier(hidden_states, deterministic=deterministic) if logits_mask is not None: # removing question tokens from the competition logits = logits - logits_mask * 1e6 start_logits, end_logits = logits.split(self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxBigBirdForQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, pooled_output=pooled_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
99,402
101,536
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,540
class FlaxBigBirdForQuestionAnswering(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForQuestionAnsweringModule @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, question_lengths=None, params: dict = None, dropout_rng: Optional[jax.random.PRNGKey] = None, indices_rng: Optional[jax.random.PRNGKey] = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) if question_lengths is None and input_ids is not None: # assuming input_ids format: <cls> <question> <sep> context <sep> question_lengths = jnp.argmax((input_ids == self.config.sep_token_id).astype("i4"), axis=-1) + 1 question_lengths = jnp.expand_dims(question_lengths, axis=1) seqlen = input_ids.shape[1] logits_mask = None if question_lengths is not None: # setting lengths logits to `-inf` logits_mask = self.prepare_question_mask(question_lengths, seqlen) if token_type_ids is None: token_type_ids = (~logits_mask).astype("i4") logits_mask = jnp.expand_dims(logits_mask, axis=2) logits_mask = logits_mask.at[:, 0].set(False) # init input tensors if not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng if indices_rng is not None: rngs["indices"] = indices_rng return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids, jnp.array(position_ids, dtype="i4"), jnp.array(head_mask, dtype="i4"), logits_mask, not train, output_attentions, output_hidden_states, return_dict, rngs=rngs, ) @staticmethod def prepare_question_mask(q_lengths, maxlen: int): # q_lengths -> (bz, 1) mask = jnp.arange(0, maxlen) mask = jnp.expand_dims(mask, axis=0) < q_lengths return mask
class_definition
101,830
105,114
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,541
class FlaxBigBirdForCausalLMModule(nn.Module): config: BigBirdConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.bert = FlaxBigBirdModule( config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.cls = FlaxBigBirdOnlyMLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, token_type_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.bert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.bert.variables["params"]["embeddings"]["word_embeddings"]["embedding"] else: shared_embedding = None # Compute the prediction scores logits = self.cls(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, )
class_definition
105,282
107,508
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,542
class FlaxBigBirdForCausalLM(FlaxBigBirdPreTrainedModel): module_class = FlaxBigBirdForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyway. # Thus, we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs
class_definition
107,821
109,358
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_flax_big_bird.py
null
4,543
class BigBirdEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) # End copy self.rescale_embeddings = config.rescale_embeddings self.hidden_size = config.hidden_size def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if self.rescale_embeddings: inputs_embeds = inputs_embeds * (self.hidden_size**0.5) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.dropout(embeddings) embeddings = self.LayerNorm(embeddings) return embeddings
class_definition
9,017
12,260
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,544
class BigBirdSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs
class_definition
12,263
17,336
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,545
class BigBirdBlockSparseAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.max_seqlen = config.max_position_embeddings self.seed = seed if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.num_random_blocks = config.num_random_blocks self.block_size = config.block_size self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions=None, ): # Currently this `class` can't be used in decoder. batch_size, seqlen, _ = hidden_states.size() to_seq_length = from_seq_length = seqlen from_block_size = to_block_size = self.block_size if from_seq_length % from_block_size != 0: raise ValueError("Query sided sequence length must be multiple of block size") if to_seq_length % to_block_size != 0: raise ValueError("Key/Value sided sequence length must be multiple of block size") query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) context_layer, attention_probs = self.bigbird_block_sparse_attention( query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, self.num_attention_heads, self.num_random_blocks, self.attention_head_size, from_block_size, to_block_size, batch_size, from_seq_length, to_seq_length, seed=self.seed, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=output_attentions, ) context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs @staticmethod def torch_bmm_nd(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication""" # faster replacement of torch.einsum ("bhqk,bhkd->bhqd") return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view( inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1]) ) @staticmethod def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication with transpose""" # faster replacement of torch.einsum (bhqd,bhkd->bhqk) return torch.bmm( inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2) ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2])) def bigbird_block_sparse_attention( self, query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, n_heads, n_rand_blocks, attention_head_size, from_block_size, to_block_size, batch_size, from_seq_len, to_seq_len, seed, plan_from_length, plan_num_rand_blocks, output_attentions, ): # BigBird block-sparse attention as suggested in paper # ITC: # global tokens: 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # ETC: # global tokens: extra_globals_tokens + 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # Note: # 1) Currently, ETC is not supported. # 2) Window size is fixed to 3 blocks & it can be changed only by # changing `block_size`. # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be # controlled only by `block_size`. # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention) # hence following code can be divided into 5 parts. if from_seq_len // from_block_size != to_seq_len // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rsqrt_d = 1 / math.sqrt(attention_head_size) bsz = batch_size attn_mask_penalty = -10000.0 # generate random attention and corresponding masks np.random.seed(seed) if from_seq_len in [1024, 3072, 4096]: # old plans used in paper rand_attn = [ self._bigbird_block_rand_mask( self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024 )[: (from_seq_len // from_block_size - 2)] for _ in range(n_heads) ] else: if plan_from_length is None: plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan( from_seq_len, from_block_size, n_rand_blocks ) rand_attn = self._bigbird_block_rand_mask_with_head( from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, num_heads=n_heads, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks, ) rand_attn = np.stack(rand_attn, axis=0) rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long) rand_attn.unsqueeze_(0) rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0) rand_mask = self._create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size ) blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) # preparing block for randn attn gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn) gathered_key = gathered_key.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn) gathered_value = gathered_value.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] # 1st PART # 1st block (global block) attention scores # q[0] x (k[0], k[1], k[2], k[3], k[4] .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4) first_product = first_product * rsqrt_d first_product += (1.0 - to_mask) * attn_mask_penalty first_attn_weights = nn.functional.softmax( first_product, dim=-1 ) # [bsz, n_heads, from_block_size, to_seq_len] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4) first_context_layer.unsqueeze_(2) # 2nd PART # 2nd block attention scores # q[1] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> 2nd, 3rd blocks # global key blocks -> 1st block second_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] second_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4) second_seq_pad = torch.cat( [ to_mask[:, :, :, : 3 * to_block_size], to_mask[:, :, :, -to_block_size:], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, 0], ], dim=3, ) second_product = second_product * rsqrt_d second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = nn.functional.softmax( second_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4) second_context_layer.unsqueeze_(2) # 3rd PART # Middle blocks attention scores # q[-2:2] x (sliding_keys, random_keys, global_keys) # sliding attn is calculated using special trick of shifting tokens as discussed in paper # random keys are generated by taking random indices as per `rand_attn` # global keys -> 1st & last block exp_blocked_key_matrix = torch.cat( [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3 ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] exp_blocked_value_matrix = torch.cat( [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]], dim=3, ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] middle_query_matrix = blocked_query_matrix[:, :, 2:-2] # sliding attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size] inner_band_product = inner_band_product * rsqrt_d # randn attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] rand_band_product = rand_band_product * rsqrt_d # Including 1st block (since it's global) first_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] first_band_product = first_band_product * rsqrt_d # Including last block (since it's global) last_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] last_band_product = last_band_product * rsqrt_d # masking padded tokens inner_band_product += (1.0 - band_mask) * attn_mask_penalty first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty # completing attention scores matrix for all q[-2:2] band_product = torch.cat( [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # safely doing softmax since attention matrix is completed attn_weights = nn.functional.softmax( band_product, dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # contribution of sliding keys # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] context_layer = self.torch_bmm_nd( attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of random keys # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] context_layer += self.torch_bmm_nd( attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of global keys context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # 4th PART # last 2nd token attention scores # q[-2] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> last 3 blocks # global key block -> 1st block # random key block -> based on indices stored in `randn_attn` second_last_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1] second_last_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+r)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4) second_last_seq_pad = torch.cat( [ to_mask[:, :, :, :to_block_size], to_mask[:, :, :, -3 * to_block_size :], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_last_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, -1], ], dim=3, ) second_last_product = second_last_product * rsqrt_d second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty second_last_attn_weights = nn.functional.softmax( second_last_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4) second_last_context_layer.unsqueeze_(2) # 5th PART # last block (global) attention scores # q[-1] x (k[0], k[1], k[2], k[3], .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4) last_product = last_product * rsqrt_d last_product += (1.0 - to_mask) * attn_mask_penalty last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4) last_context_layer.unsqueeze_(2) # combining representations of all tokens context_layer = torch.cat( [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer], dim=2, ) context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask context_layer = torch.transpose(context_layer, 1, 2) # this is just for visualizing; forward pass doesn't depend on following code if output_attentions: # TODO(PVP): need to verify if below code is correct attention_probs = torch.zeros( bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device ) # 1st query block # corresponding to `first_context_layer` attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global # 2nd query block # corresponding to `second_context_layer` attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[ :, :, :, : 3 * to_block_size ] # 1st three key blocks (global + sliding) attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[ :, :, :, 3 * to_block_size : 4 * to_block_size ] # last key block (global) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Middle query blocks # corresponding to `context_layer` # sliding keys for q_idx in range(from_seq_len // from_block_size - 4): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, )[:, :, 2:-2, :, 1:-1, :] right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size] attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view( bsz, n_heads, from_block_size, 3, to_block_size ) # inner_band_product # global keys (corresponding to 1st key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[ :, :, :, :, :to_block_size ].view(bsz, n_heads, -1, to_block_size) # first_band_product # global keys (corresponding to last key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[ :, :, :, :, -to_block_size: ].view(bsz, n_heads, -1, to_block_size) # last_band_product # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads for q_idx in range(1, len(i2) - 1): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size] attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Second-last query block # corresponding to `second_last_context_layer` attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[ :, :, :, :to_block_size ] # 1st key block (global) attention_probs[:, :, -2 * from_block_size : -from_block_size, -3 * to_block_size :] = ( second_last_attn_weights[:, :, :, to_block_size : 4 * to_block_size] ) # last three blocks (global + sliding) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # last query block # corresponding to `last_context_layer` attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global else: attention_probs = None return context_layer, attention_probs @staticmethod def torch_gather_b2(params, indices): # this operation is equivalent to tf.gather when batch_dims=2 if params.shape[:2] != indices.shape[:2]: raise ValueError( "Make sure that the first two dimensions of params and indices are identical, but" f" they are params: {params.shape[:2]} vs. indices: {indices.shape[:2]}" ) num_indices_to_gather = indices.shape[-2] * indices.shape[-1] num_indices_to_pick_from = params.shape[2] shift = torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device) indices_shift = torch.div(shift, num_indices_to_gather, rounding_mode="floor") * num_indices_to_pick_from flattened_indices = indices.view(-1) + indices_shift flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1]) out_flattened = flattened_params.index_select(0, flattened_indices) out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:]) return out @staticmethod def _create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size, ): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. rand_attn: [batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks] num_attention_heads: int. Number of attention heads. num_rand_blocks: int. Number of random chunks per row. batch_size: int. Batch size for computation. from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. Returns: float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2, from_block_size, num_rand_blocks*to_block_size]. """ num_windows = from_seq_length // from_block_size - 2 rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)]) rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size) rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): """ Gives the plan of where to put random attention. Args: from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. num_rand_blocks: int. Number of random chunks per row. Returns: plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for each block """ plan_from_length = [] plan_num_rand_blocks = [] if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif (num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2)) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return plan_from_length, plan_num_rand_blocks def _bigbird_block_rand_mask( self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1 ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_rand_blocks: int. Number of random chunks per row. last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence, if positive then num_rand_blocks blocks chosen only up to last_idx. Returns: adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32) # During inference (eval) no randomness if not self.training: return rand_attn middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32) last = to_seq_length // to_block_size - 1 if last_idx > (2 * to_block_size): last = (last_idx // to_block_size) - 1 r = num_rand_blocks # shorthand for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r] elif i == 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r] elif i == from_seq_length // from_block_size - 3: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -3: should have been sliced till last-3 elif i == from_seq_length // from_block_size - 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -4: should have been sliced till last-4 else: if start > last: start = last rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] elif (end + 1) == last: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] else: rand_attn[i - 1, :] = np.random.permutation( np.concatenate((middle_seq[:start], middle_seq[end + 1 : last])) )[:r] return rand_attn def _bigbird_block_rand_mask_with_head( self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1, ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_heads: int. total number of heads. plan_from_length: list. plan from length where num_random_blocks are chosen from. plan_num_rand_blocks: list. number of rand blocks within the plan. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_top: int. number of blocks at the top. global_block_bottom: int. number of blocks at the bottom. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length not in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") if from_seq_length not in plan_from_length: raise ValueError("Error from sequence length not in plan!") # Total number of blocks in the mmask num_blocks = from_seq_length // from_block_size # Number of blocks per plan plan_block_length = np.array(plan_from_length) // from_block_size # till when to follow plan max_plan_idx = plan_from_length.index(from_seq_length) # Random Attention adjacency list rand_attn = [ np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32) for i in range(num_heads) ] # During inference (eval) no randomness if not self.training: for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn # We will go iteratively over the plan blocks and pick random number of # Attention blocks from the legally allowed blocks for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: # set the row for all from_blocks starting from 0 to # plan_block_length[plan_idx-1] # column indx start fromm plan_block_length[plan_idx-1] and ends at # plan_block_length[plan_idx] if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1])) for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention( block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1, ): """ For a single row block get random row attention. Args: block_id: int. block id of row. to_start_block_id: int. random attention column start id. to_end_block_id: int. random attention column end id. num_rand_blocks: int. number of random blocks to be selected. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: row containing the random attention vector of size num_rand_blocks. """ # list of to_blocks from which to choose random attention to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32) # permute the blocks perm_block = np.random.permutation(to_block_list) # illegal blocks for the current block id, using window illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1)) # Add blocks at the start and at the end illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) # The second from_block cannot choose random attention on second last to_block if block_id == 1: illegal_blocks.append(to_end_block_id - 2) # The second last from_block cannot choose random attention on second to_block if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blokcs = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blokcs.append(perm_block[i]) if len(selected_random_blokcs) == num_rand_blocks: break return np.array(selected_random_blokcs, dtype=np.int32)
class_definition
17,339
60,445
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,546
class BigBirdSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class_definition
60,535
61,144
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,547
class BigBirdAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.attention_type = config.attention_type self.config = config self.seed = seed if self.config.attention_type == "original_full": self.self = BigBirdSelfAttention(config) elif self.config.attention_type == "block_sparse": self.self = BigBirdBlockSparseAttention(config, seed) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}" ) self.output = BigBirdSelfOutput(config) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value if value == "original_full": # copy all weights to new full attention class attn_weights = BigBirdSelfAttention(self.config) else: # copy all weights to new sparse attention class attn_weights = BigBirdBlockSparseAttention(self.config, self.seed) attn_weights.query = self.self.query attn_weights.value = self.self.value attn_weights.key = self.self.key self.self = attn_weights self.attention_type = value if not self.training: self.self.eval() def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, # block_sparse config band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, ): # fp16 compatibility if band_mask is not None: band_mask = band_mask.to(hidden_states.dtype) if from_mask is not None: from_mask = from_mask.to(hidden_states.dtype) if to_mask is not None: to_mask = to_mask.to(hidden_states.dtype) if self.attention_type == "original_full": self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: if encoder_hidden_states is not None: raise ValueError("BigBird cannot be used as a decoder when config.attention_type != 'original_full'") self_outputs = self.self( hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs
class_definition
61,147
64,382
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,548
class BigBirdIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
class_definition
64,474
65,042
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,549
class BigBirdOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states
class_definition
65,128
65,739
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,550
class BigBirdLayer(nn.Module): def __init__(self, config, seed=None): super().__init__() self.config = config self.attention_type = config.attention_type self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BigBirdAttention(config, seed=seed) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise TypeError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = BigBirdAttention(config) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.attention.set_attention_type(value) if self.add_cross_attention: self.crossattention.set_attention_type(value) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=blocked_encoder_mask, to_blocked_mask=blocked_encoder_mask, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " " cross-attention layers by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output
class_definition
65,742
70,496
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,551
class BigBirdEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.attention_type = config.attention_type self.layer = nn.ModuleList( [BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value for layer in self.layer: layer.set_attention_type(value) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, return_dict=True, ) -> Union[BaseModelOutputWithPastAndCrossAttentions, Tuple]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, )
class_definition
70,499
74,980
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,552
class BigBirdPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states
class_definition
75,083
75,786
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,553
class BigBirdLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BigBirdPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states
class_definition
75,882
76,720
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,554
class BigBirdOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores
class_definition
76,811
77,131
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,555
class BigBirdOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score
class_definition
77,222
77,529
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,556
class BigBirdPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score
class_definition
77,625
78,094
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,557
class BigBirdPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BigBirdConfig load_tf_weights = load_tf_weights_in_big_bird base_model_prefix = "bert" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
class_definition
78,097
79,257
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,558
class BigBirdForPreTrainingOutput(ModelOutput): """ Output type of [`BigBirdForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None
class_definition
82,544
84,500
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,559
class BigBirdForQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). pooler_output (`torch.FloatTensor` of shape `(batch_size, 1)`): pooler output from BigBigModel hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None
class_definition
84,514
86,408
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,560
class BigBirdModel(BigBirdPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.attention_type = self.config.attention_type self.config = config self.block_size = self.config.block_size self.embeddings = BigBirdEmbeddings(config) self.encoder = BigBirdEncoder(config) if add_pooling_layer: self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() else: self.pooler = None self.activation = None if self.attention_type != "original_full" and config.add_cross_attention: logger.warning( "When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting" " `attention_type=original_full`" ) self.set_attention_type("original_full") # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.encoder.set_attention_type(value) @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BaseModelOutputWithPoolingAndCrossAttentions, Tuple[torch.FloatTensor]]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # in order to use block_sparse attention, sequence_length has to be at least # bigger than all global attentions: 2 * block_size # + sliding tokens: 3 * block_size # + random tokens: 2 * num_random_blocks * block_size max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size if self.attention_type == "block_sparse" and seq_length <= max_tokens_to_attend: # change attention_type from block_sparse to original_full sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) logger.warning( "Attention type 'block_sparse' is not possible if sequence_length: " f"{sequence_length} <= num global tokens: 2 * config.block_size " "+ min. num sliding tokens: 3 * config.block_size " "+ config.num_random_blocks * config.block_size " "+ additional buffer: config.num_random_blocks * config.block_size " f"= {max_tokens_to_attend} with config.block_size " f"= {self.config.block_size}, config.num_random_blocks " f"= {self.config.num_random_blocks}. " "Changing attention type to 'original_full'..." ) self.set_attention_type("original_full") if self.attention_type == "block_sparse": ( padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, ) = self._pad_to_block_size( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, pad_token_id=self.config.pad_token_id, ) else: padding_len = 0 if self.attention_type == "block_sparse": blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn( attention_mask, self.block_size ) extended_attention_mask = None elif self.attention_type == "original_full": blocked_encoder_mask = None band_mask = None from_mask = None to_mask = None # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.attention_type}" ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, blocked_encoder_mask=blocked_encoder_mask, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None # undo padding if padding_len > 0: # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1) sequence_output = sequence_output[:, :-padding_len] if not return_dict: return (sequence_output, pooler_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooler_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @staticmethod def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int): batch_size, seq_length = attention_mask.size() if seq_length % block_size != 0: raise ValueError( f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block" f" size is {block_size}." ) def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Returns: float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. """ exp_blocked_to_pad = torch.cat( [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2 ) band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask.unsqueeze_(1) return band_mask blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) from_mask = attention_mask.view(batch_size, 1, seq_length, 1) to_mask = attention_mask.view(batch_size, 1, 1, seq_length) return blocked_encoder_mask, band_mask, from_mask, to_mask def _pad_to_block_size( self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor, position_ids: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int, ): """A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention.""" # padding block_size = self.config.block_size input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape batch_size, seq_len = input_shape[:2] padding_len = (block_size - seq_len % block_size) % block_size if padding_len > 0: logger.warning_once( f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of " f"`config.block_size`: {block_size}" ) if input_ids is not None: input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id) if position_ids is not None: # pad with position_id = pad_token_id as in modeling_bigbird.BigBirdEmbeddings position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id) if inputs_embeds is not None: input_ids_padding = inputs_embeds.new_full( (batch_size, padding_len), self.config.pad_token_id, dtype=torch.long, ) inputs_embeds_padding = self.embeddings(input_ids_padding) inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) attention_mask = nn.functional.pad( attention_mask, (0, padding_len), value=False ) # no attention on the padding tokens token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0 return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
class_definition
86,571
103,282
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,561
class BigBirdForPreTraining(BigBirdPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config, add_pooling_layer=True) self.cls = BigBirdPreTrainingHeads(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, next_sentence_label: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BigBirdForPreTrainingOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be added to masked_lm loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Used to hide legacy arguments that have been deprecated. Returns: Example: ```python >>> from transformers import AutoTokenizer, BigBirdForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None: loss_fct = CrossEntropyLoss() total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if next_sentence_label is not None and total_loss is not None: next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = total_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BigBirdForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
103,285
108,027
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,562
class BigBirdForMaskedLM(BigBirdPreTrainedModel): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForMaskedLM >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForMaskedLM.from_pretrained("google/bigbird-roberta-base") >>> squad_ds = load_dataset("rajpurkar/squad_v2", split="train") # doctest: +IGNORE_RESULT >>> # select random long article >>> LONG_ARTICLE_TARGET = squad_ds[81514]["context"] >>> # select random sentence >>> LONG_ARTICLE_TARGET[332:398] 'the highest values are very close to the theoretical maximum value' >>> # add mask_token >>> LONG_ARTICLE_TO_MASK = LONG_ARTICLE_TARGET.replace("maximum", "[MASK]") >>> inputs = tokenizer(LONG_ARTICLE_TO_MASK, return_tensors="pt") >>> # long article input >>> list(inputs["input_ids"].shape) [1, 919] >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) 'maximum' ``` ```python >>> labels = tokenizer(LONG_ARTICLE_TARGET, return_tensors="pt")["input_ids"] >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) 1.99 ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token if self.config.pad_token_id is None: raise ValueError("The PAD token should be defined for generation") attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device ) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {"input_ids": input_ids, "attention_mask": attention_mask}
class_definition
108,139
114,146
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,563
class BigBirdForCausalLM(BigBirdPreTrainedModel, GenerationMixin): _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`") self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[CausalLMOutputWithCrossAttentions, Tuple[torch.FloatTensor]]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past
class_definition
114,284
120,298
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,564
class BigBirdClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x
class_definition
120,301
121,124
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,565
class BigBirdForSequenceClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = BigBirdModel(config) self.classifier = BigBirdClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForSequenceClassification >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("l-yohai/bigbird-roberta-base-mnli") >>> model = BigBirdForSequenceClassification.from_pretrained("l-yohai/bigbird-roberta-base-mnli") >>> squad_ds = load_dataset("rajpurkar/squad_v2", split="train") # doctest: +IGNORE_RESULT >>> LONG_ARTICLE = squad_ds[81514]["context"] >>> inputs = tokenizer(LONG_ARTICLE, return_tensors="pt") >>> # long input article >>> list(inputs["input_ids"].shape) [1, 919] >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] 'LABEL_0' ``` ```python >>> num_labels = len(model.config.id2label) >>> model = BigBirdForSequenceClassification.from_pretrained( ... "l-yohai/bigbird-roberta-base-mnli", num_labels=num_labels ... ) >>> labels = torch.tensor(1) >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) 1.13 ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
121,353
126,378
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,566
class BigBirdForMultipleChoice(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[MultipleChoiceModelOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
126,616
130,157
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,567
class BigBirdForTokenClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BigBirdModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class_definition
130,393
133,249
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,568
class BigBirdForQuestionAnsweringHead(nn.Module): """Head for question answering tasks.""" def __init__(self, config): super().__init__() self.dropout = nn.Dropout(config.hidden_dropout_prob) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_output): hidden_states = self.dropout(encoder_output) hidden_states = self.intermediate(hidden_states) hidden_states = self.output(hidden_states, encoder_output) hidden_states = self.qa_outputs(hidden_states) return hidden_states
class_definition
133,252
133,944
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,569
class BigBirdForQuestionAnswering(BigBirdPreTrainedModel): def __init__(self, config, add_pooling_layer=False): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.sep_token_id = config.sep_token_id self.bert = BigBirdModel(config, add_pooling_layer=add_pooling_layer) self.qa_classifier = BigBirdForQuestionAnsweringHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BigBirdForQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, question_lengths: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BigBirdForQuestionAnsweringModelOutput, Tuple[torch.FloatTensor]]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForQuestionAnswering >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForQuestionAnswering.from_pretrained("google/bigbird-roberta-base") >>> squad_ds = load_dataset("rajpurkar/squad_v2", split="train") # doctest: +IGNORE_RESULT >>> # select random article and question >>> LONG_ARTICLE = squad_ds[81514]["context"] >>> QUESTION = squad_ds[81514]["question"] >>> QUESTION 'During daytime how high can the temperatures reach?' >>> inputs = tokenizer(QUESTION, LONG_ARTICLE, return_tensors="pt") >>> # long article and question input >>> list(inputs["input_ids"].shape) [1, 929] >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> predict_answer_token_ids = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> predict_answer_token = tokenizer.decode(predict_answer_token_ids) ``` ```python >>> target_start_index, target_end_index = torch.tensor([130]), torch.tensor([132]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = outputs.loss ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) if question_lengths is None and input_ids is not None: # assuming input_ids format: <cls> <question> <sep> context <sep> question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1 question_lengths.unsqueeze_(1) logits_mask = None if question_lengths is not None: # setting lengths logits to `-inf` logits_mask = self.prepare_question_mask(question_lengths, seqlen) if token_type_ids is None: token_type_ids = torch.ones(logits_mask.size(), dtype=int, device=logits_mask.device) - logits_mask logits_mask = logits_mask logits_mask[:, 0] = False logits_mask.unsqueeze_(2) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_classifier(sequence_output) if logits_mask is not None: # removing question tokens from the competition logits = logits - logits_mask * 1e6 start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BigBirdForQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @staticmethod def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int): # q_lengths -> (bz, 1) mask = torch.arange(0, maxlen).to(q_lengths.device) mask.unsqueeze_(0) # -> (1, maxlen) mask = torch.where(mask < q_lengths, 1, 0) return mask
class_definition
134,238
141,512
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/big_bird/modeling_big_bird.py
null
4,570
class _BaseAutoModelClass: # Base class for auto models. _model_mapping = None def __init__(self, *args, **kwargs): raise EnvironmentError( f"{self.__class__.__name__} is designed to be instantiated " f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " f"`{self.__class__.__name__}.from_config(config)` methods." ) @classmethod def from_config(cls, config, **kwargs): trust_remote_code = kwargs.pop("trust_remote_code", None) has_remote_code = hasattr(config, "auto_map") and cls.__name__ in config.auto_map has_local_code = type(config) in cls._model_mapping.keys() trust_remote_code = resolve_trust_remote_code( trust_remote_code, config._name_or_path, has_local_code, has_remote_code ) if has_remote_code and trust_remote_code: class_ref = config.auto_map[cls.__name__] if "--" in class_ref: repo_id, class_ref = class_ref.split("--") else: repo_id = config.name_or_path model_class = get_class_from_dynamic_module(class_ref, repo_id, **kwargs) cls.register(config.__class__, model_class, exist_ok=True) _ = kwargs.pop("code_revision", None) model_class = add_generation_mixin_to_remote_model(model_class) return model_class._from_config(config, **kwargs) elif type(config) in cls._model_mapping.keys(): model_class = _get_model_class(config, cls._model_mapping) return model_class._from_config(config, **kwargs) raise ValueError( f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n" f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}." ) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): config = kwargs.pop("config", None) trust_remote_code = kwargs.pop("trust_remote_code", None) kwargs["_from_auto"] = True hub_kwargs_names = [ "cache_dir", "force_download", "local_files_only", "proxies", "resume_download", "revision", "subfolder", "use_auth_token", "token", ] hub_kwargs = {name: kwargs.pop(name) for name in hub_kwargs_names if name in kwargs} code_revision = kwargs.pop("code_revision", None) commit_hash = kwargs.pop("_commit_hash", None) adapter_kwargs = kwargs.pop("adapter_kwargs", None) token = hub_kwargs.pop("token", None) use_auth_token = hub_kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if token is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) token = use_auth_token if token is not None: hub_kwargs["token"] = token if commit_hash is None: if not isinstance(config, PretrainedConfig): # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible resolved_config_file = cached_file( pretrained_model_name_or_path, CONFIG_NAME, _raise_exceptions_for_gated_repo=False, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, **hub_kwargs, ) commit_hash = extract_commit_hash(resolved_config_file, commit_hash) else: commit_hash = getattr(config, "_commit_hash", None) if is_peft_available(): if adapter_kwargs is None: adapter_kwargs = {} if token is not None: adapter_kwargs["token"] = token maybe_adapter_path = find_adapter_config_file( pretrained_model_name_or_path, _commit_hash=commit_hash, **adapter_kwargs ) if maybe_adapter_path is not None: with open(maybe_adapter_path, "r", encoding="utf-8") as f: adapter_config = json.load(f) adapter_kwargs["_adapter_model_path"] = pretrained_model_name_or_path pretrained_model_name_or_path = adapter_config["base_model_name_or_path"] if not isinstance(config, PretrainedConfig): kwargs_orig = copy.deepcopy(kwargs) # ensure not to pollute the config object with torch_dtype="auto" - since it's # meaningless in the context of the config object - torch.dtype values are acceptable if kwargs.get("torch_dtype", None) == "auto": _ = kwargs.pop("torch_dtype") # to not overwrite the quantization_config if config has a quantization_config if kwargs.get("quantization_config", None) is not None: _ = kwargs.pop("quantization_config") config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, trust_remote_code=trust_remote_code, code_revision=code_revision, _commit_hash=commit_hash, **hub_kwargs, **kwargs, ) # if torch_dtype=auto was passed here, ensure to pass it on if kwargs_orig.get("torch_dtype", None) == "auto": kwargs["torch_dtype"] = "auto" if kwargs_orig.get("quantization_config", None) is not None: kwargs["quantization_config"] = kwargs_orig["quantization_config"] has_remote_code = hasattr(config, "auto_map") and cls.__name__ in config.auto_map has_local_code = type(config) in cls._model_mapping.keys() trust_remote_code = resolve_trust_remote_code( trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code ) # Set the adapter kwargs kwargs["adapter_kwargs"] = adapter_kwargs if has_remote_code and trust_remote_code: class_ref = config.auto_map[cls.__name__] model_class = get_class_from_dynamic_module( class_ref, pretrained_model_name_or_path, code_revision=code_revision, **hub_kwargs, **kwargs ) _ = hub_kwargs.pop("code_revision", None) cls.register(config.__class__, model_class, exist_ok=True) model_class = add_generation_mixin_to_remote_model(model_class) return model_class.from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs ) elif type(config) in cls._model_mapping.keys(): model_class = _get_model_class(config, cls._model_mapping) return model_class.from_pretrained( pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs ) raise ValueError( f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n" f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}." ) @classmethod def register(cls, config_class, model_class, exist_ok=False): """ Register a new model for this class. Args: config_class ([`PretrainedConfig`]): The configuration corresponding to the model to register. model_class ([`PreTrainedModel`]): The model to register. """ if hasattr(model_class, "config_class") and str(model_class.config_class) != str(config_class): raise ValueError( "The model class you are passing has a `config_class` attribute that is not consistent with the " f"config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix " "one of those so they match!" ) cls._model_mapping.register(config_class, model_class, exist_ok=exist_ok)
class_definition
25,155
33,769
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/auto_factory.py
null
4,571
class _BaseAutoBackboneClass(_BaseAutoModelClass): # Base class for auto backbone models. _model_mapping = None @classmethod def _load_timm_backbone_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): requires_backends(cls, ["vision", "timm"]) from ...models.timm_backbone import TimmBackboneConfig config = kwargs.pop("config", TimmBackboneConfig()) if kwargs.get("out_features", None) is not None: raise ValueError("Cannot specify `out_features` for timm backbones") if kwargs.get("output_loading_info", False): raise ValueError("Cannot specify `output_loading_info=True` when loading from timm") num_channels = kwargs.pop("num_channels", config.num_channels) features_only = kwargs.pop("features_only", config.features_only) use_pretrained_backbone = kwargs.pop("use_pretrained_backbone", config.use_pretrained_backbone) out_indices = kwargs.pop("out_indices", config.out_indices) config = TimmBackboneConfig( backbone=pretrained_model_name_or_path, num_channels=num_channels, features_only=features_only, use_pretrained_backbone=use_pretrained_backbone, out_indices=out_indices, ) return super().from_config(config, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): use_timm_backbone = kwargs.pop("use_timm_backbone", False) if use_timm_backbone: return cls._load_timm_backbone_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
class_definition
33,772
35,532
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/auto_factory.py
null
4,572
class _LazyAutoMapping(OrderedDict): """ " A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed. Args: - config_mapping: The map model type to config class - model_mapping: The map model type to model (or tokenizer) class """ def __init__(self, config_mapping, model_mapping): self._config_mapping = config_mapping self._reverse_config_mapping = {v: k for k, v in config_mapping.items()} self._model_mapping = model_mapping self._model_mapping._model_mapping = self self._extra_content = {} self._modules = {} def __len__(self): common_keys = set(self._config_mapping.keys()).intersection(self._model_mapping.keys()) return len(common_keys) + len(self._extra_content) def __getitem__(self, key): if key in self._extra_content: return self._extra_content[key] model_type = self._reverse_config_mapping[key.__name__] if model_type in self._model_mapping: model_name = self._model_mapping[model_type] return self._load_attr_from_module(model_type, model_name) # Maybe there was several model types associated with this config. model_types = [k for k, v in self._config_mapping.items() if v == key.__name__] for mtype in model_types: if mtype in self._model_mapping: model_name = self._model_mapping[mtype] return self._load_attr_from_module(mtype, model_name) raise KeyError(key) def _load_attr_from_module(self, model_type, attr): module_name = model_type_to_module_name(model_type) if module_name not in self._modules: self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models") return getattribute_from_module(self._modules[module_name], attr) def keys(self): mapping_keys = [ self._load_attr_from_module(key, name) for key, name in self._config_mapping.items() if key in self._model_mapping.keys() ] return mapping_keys + list(self._extra_content.keys()) def get(self, key, default): try: return self.__getitem__(key) except KeyError: return default def __bool__(self): return bool(self.keys()) def values(self): mapping_values = [ self._load_attr_from_module(key, name) for key, name in self._model_mapping.items() if key in self._config_mapping.keys() ] return mapping_values + list(self._extra_content.values()) def items(self): mapping_items = [ ( self._load_attr_from_module(key, self._config_mapping[key]), self._load_attr_from_module(key, self._model_mapping[key]), ) for key in self._model_mapping.keys() if key in self._config_mapping.keys() ] return mapping_items + list(self._extra_content.items()) def __iter__(self): return iter(self.keys()) def __contains__(self, item): if item in self._extra_content: return True if not hasattr(item, "__name__") or item.__name__ not in self._reverse_config_mapping: return False model_type = self._reverse_config_mapping[item.__name__] return model_type in self._model_mapping def register(self, key, value, exist_ok=False): """ Register a new model in this mapping. """ if hasattr(key, "__name__") and key.__name__ in self._reverse_config_mapping: model_type = self._reverse_config_mapping[key.__name__] if model_type in self._model_mapping.keys() and not exist_ok: raise ValueError(f"'{key}' is already used by a Transformers model.") self._extra_content[key] = value
class_definition
40,580
44,541
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/auto_factory.py
null
4,573
class FlaxAutoModel(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_MAPPING
class_definition
11,405
11,486
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,574
class FlaxAutoModelForPreTraining(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_PRETRAINING_MAPPING
class_definition
11,540
11,651
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,575
class FlaxAutoModelForCausalLM(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
class_definition
11,757
11,863
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,576
class FlaxAutoModelForMaskedLM(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_MASKED_LM_MAPPING
class_definition
11,976
12,082
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,577
class FlaxAutoModelForSeq2SeqLM(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
class_definition
12,195
12,313
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,578
class FlaxAutoModelForSequenceClassification(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
class_definition
12,501
12,635
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,579
class FlaxAutoModelForQuestionAnswering(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
class_definition
12,781
12,905
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,580
class FlaxAutoModelForTokenClassification(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
class_definition
13,030
13,158
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,581
class FlaxAutoModelForMultipleChoice(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
class_definition
13,295
13,413
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,582
class FlaxAutoModelForNextSentencePrediction(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
class_definition
13,529
13,664
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,583
class FlaxAutoModelForImageClassification(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
class_definition
13,811
13,939
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,584
class FlaxAutoModelForVision2Seq(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
class_definition
14,076
14,187
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,585
class FlaxAutoModelForSpeechSeq2Seq(_BaseAutoModelClass): _model_mapping = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
class_definition
14,303
14,421
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_flax_auto.py
null
4,586
class TFAutoModelForMaskGeneration(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_MASK_GENERATION_MAPPING
class_definition
21,982
22,096
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,587
class TFAutoModelForTextEncoding(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_TEXT_ENCODING_MAPPING
class_definition
22,099
22,209
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,588
class TFAutoModel(_BaseAutoModelClass): _model_mapping = TF_MODEL_MAPPING
class_definition
22,212
22,289
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,589
class TFAutoModelForAudioClassification(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
class_definition
22,339
22,463
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,590
class TFAutoModelForPreTraining(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_PRETRAINING_MAPPING
class_definition
22,596
22,703
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,591
class _TFAutoModelWithLMHead(_BaseAutoModelClass): _model_mapping = TF_MODEL_WITH_LM_HEAD_MAPPING
class_definition
22,879
22,980
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,592
class TFAutoModelForCausalLM(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING
class_definition
23,082
23,184
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,593
class TFAutoModelForMaskedImageModeling(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING
class_definition
23,293
23,418
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,594
class TFAutoModelForImageClassification(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
class_definition
23,552
23,676
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,595
class TFAutoModelForZeroShotImageClassification(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
class_definition
23,809
23,951
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,596
class TFAutoModelForSemanticSegmentation(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
class_definition
24,110
24,236
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,597
class TFAutoModelForVision2Seq(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING
class_definition
24,372
24,479
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,598
class TFAutoModelForMaskedLM(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING
class_definition
24,591
24,693
0
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/auto/modeling_tf_auto.py
null
4,599