text
stringlengths 1
1.02k
| class_index
int64 0
10.8k
| source
stringlengths 85
188
|
---|---|---|
# TODO (joao): remove `=None` in non-optional arguments in v4.46. Remove from `OBJECTS_TO_IGNORE` as well.
def __init__(
self,
config: PretrainedConfig,
batch_size: int = None,
max_cache_len: int = None,
device: Union[torch.device, str] = "cpu",
dtype: torch.dtype = torch.float32,
max_batch_size: Optional[int] = None,
layer_device_map: Optional[Dict[int, Union[str, torch.device, int]]] = None,
) -> None:
super().__init__()
if batch_size is not None:
logger.warning_once(
f"The 'batch_size' argument of {self.__class__.__name__} is deprecated and will be removed in "
"v4.49. Use the more precisely named 'max_batch_size' argument instead."
)
if not hasattr(config, "sliding_window") or config.sliding_window is None:
raise ValueError(
"Setting `cache_implementation` to 'sliding_window' requires the model config supporting "
| 223 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
"sliding window attention, please check if there is a `sliding_window` field in the model "
"config and it's not set to None."
)
self.max_cache_len = max_cache_len
self.max_batch_size = batch_size or max_batch_size
# Some model define a custom `head_dim` != config.hidden_size // config.num_attention_heads
self.head_dim = (
config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads
)
| 223 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
self.dtype = dtype
self.num_key_value_heads = (
config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads
)
layer_switch = config.sliding_window_pattern if hasattr(config, "sliding_window_pattern") else 2 # 2 is for BC
self.is_sliding = torch.tensor(
[bool((i + 1) % layer_switch) for i in range(config.num_hidden_layers)], dtype=torch.bool, device=device
)
self.key_cache: List[torch.Tensor] = []
self.value_cache: List[torch.Tensor] = []
global_cache_shape = (self.max_batch_size, self.num_key_value_heads, max_cache_len, self.head_dim)
sliding_cache_shape = (
self.max_batch_size,
self.num_key_value_heads,
min(config.sliding_window, max_cache_len),
self.head_dim,
)
for i in range(config.num_hidden_layers):
if layer_device_map is not None:
layer_device = layer_device_map[i]
| 223 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
else:
layer_device = device
# Note: `mark_static_address` is used to tag the cache as an fixed data pointer, preventing cuda graph
# breaks when updating the cache.
cache_shape = global_cache_shape if not self.is_sliding[i] else sliding_cache_shape
new_layer_key_cache = torch.zeros(cache_shape, dtype=self.dtype, device=layer_device)
new_layer_value_cache = torch.zeros(cache_shape, dtype=self.dtype, device=layer_device)
torch._dynamo.mark_static_address(new_layer_key_cache)
torch._dynamo.mark_static_address(new_layer_value_cache)
self.key_cache.append(new_layer_key_cache)
self.value_cache.append(new_layer_value_cache)
| 223 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
def _sliding_update(self, cache_position, layer_idx, key_states, value_states, k_out, v_out, max_cache_len):
if cache_position.shape[0] > max_cache_len:
k_out = key_states[:, :, -max_cache_len:, :]
v_out = value_states[:, :, -max_cache_len:, :]
# Assumption: caches are all zeros at this point, `+=` is equivalent to `=` but compile-friendly
self.key_cache[layer_idx] += k_out
self.value_cache[layer_idx] += v_out
# we should return the whole states instead of k_out, v_out to take the whole prompt
# into consideration when building kv cache instead of just throwing away tokens outside of the window
return key_states, value_states
| 223 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
slicing = torch.ones(max_cache_len, dtype=torch.long, device=value_states.device).cumsum(0)
cache_position = cache_position.clamp(0, max_cache_len - 1)
to_shift = cache_position >= max_cache_len - 1
indices = (slicing + to_shift[-1].int() - 1) % max_cache_len
k_out = k_out[:, :, indices]
v_out = v_out[:, :, indices]
k_out[:, :, cache_position] = key_states
v_out[:, :, cache_position] = value_states
# `_.zero()` followed by `+=` is equivalent `=`, but compile-friendly (without graph breaks due to assignment)
self.key_cache[layer_idx].zero_()
self.value_cache[layer_idx].zero_()
self.key_cache[layer_idx] += k_out
self.value_cache[layer_idx] += v_out
return k_out, v_out
def _static_update(self, cache_position, layer_idx, key_states, value_states, k_out, v_out, max_cache_len):
k_out[:, :, cache_position] = key_states
v_out[:, :, cache_position] = value_states
| 223 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
self.key_cache[layer_idx] = k_out
self.value_cache[layer_idx] = v_out
return k_out, v_out
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: Optional[Dict[str, Any]] = None,
) -> Tuple[torch.Tensor]:
cache_position = cache_kwargs.get("cache_position")
sliding_window = cache_kwargs.get("sliding_window")
k_out = self.key_cache[layer_idx]
v_out = self.value_cache[layer_idx]
if sliding_window:
update_fn = self._sliding_update
else:
update_fn = self._static_update
return update_fn(
cache_position,
layer_idx,
key_states,
value_states,
k_out,
v_out,
k_out.shape[2],
)
def get_max_cache_shape(self) -> Optional[int]:
return self.max_cache_len
| 223 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
def get_seq_length(self, layer_idx: Optional[int] = 0):
# Occupied cache == any slot in the 3rd dim (sequence length) holds a non-zero value. To save on compute, let's
# limit the check to the first batch member and head dimension.
# TODO: deprecate this function in favor of `cache_position`
if layer_idx != 0:
raise ValueError(
"`get_seq_length` on `HybridCache` may get inconsistent results depending on the layer index. "
"Using the `layer_idx` argument is not supported."
)
return (self.key_cache[layer_idx][0, 0].any(dim=-1)).sum()
def reset(self):
"""Resets the cache values while preserving the objects"""
for layer_idx in range(len(self.key_cache)):
# In-place ops prevent breaking the static address
self.key_cache[layer_idx].zero_()
self.value_cache[layer_idx].zero_()
| 223 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
@property
def batch_size(self):
logger.warning_once(
f"The 'batch_size' attribute of {self.__class__.__name__} is deprecated and will be removed in "
"v4.49. Use the more precisely named 'self.max_batch_size' attribute instead."
)
return self.max_batch_size
| 223 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
class MambaCache:
"""
Cache for mamba model which does not have attention mechanism and key value states.
Arguments:
config (`PretrainedConfig):
The configuration file defining the shape-related attributes required to initialize the static cache.
batch_size (`int`):
The batch size with which the model will be used. Note that a new instance must be instantiated if a
smaller batch size is used.
dtype (`torch.dtype`, *optional*, defaults to `torch.float16`):
The default `dtype` to use when initializing the layer.
device (`torch.device` or `str`, *optional*):
The device on which the cache should be initialized. Should be the same as the layer.
| 224 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
Attributes:
dtype: (`torch.dtype`):
The default `dtype` used to initializing the cache.
intermediate_size: (`int`):
Model's intermediate_size taken from config.
ssm_state_size: (`int`):
Model's state_size taken from config.
conv_kernel_size: (`int`):
Model's convolution kernel size taken from config
conv_states: (`torch.Tensor`):
A tensor of shape `[layer_idx, batch_size, intermediate_size, conv_kernel_size]` that holds convolutional states.
ssm_states: (`torch.Tensor`):
A tensor of shape `[layer_idx, batch_size, intermediate_size, ssm_state_size]` that holds ssm states
Example:
```python
>>> from transformers import AutoTokenizer, MambaForCausalLM, MambaCache
>>> model = MambaForCausalLM.from_pretrained("state-spaces/mamba-130m-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("state-spaces/mamba-130m-hf")
| 224 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
>>> inputs = tokenizer(text="My name is Mamba", return_tensors="pt")
>>> # Prepare a cache class and pass it to model's forward
>>> past_key_values = MambaCache(config=model.config, batch_size=1, device=model.device, dtype=model.dtype)
>>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
>>> outputs.past_key_values
MambaCache()
```
"""
| 224 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
# TODO (joao): remove `=None` in non-optional arguments in v4.46. Remove from `OBJECTS_TO_IGNORE` as well.
def __init__(
self,
config: PretrainedConfig,
batch_size: int = None,
dtype: torch.dtype = torch.float16,
device: Optional[Union[torch.device, str]] = None,
max_batch_size: Optional[int] = None,
):
if batch_size is not None:
logger.warning_once(
f"The 'batch_size' argument of {self.__class__.__name__} is deprecated and will be removed in "
"v4.49. Use the more precisely named 'max_batch_size' argument instead."
)
self.dtype = dtype
self.max_batch_size = batch_size or max_batch_size
self.intermediate_size = config.intermediate_size
self.ssm_state_size = config.state_size
self.conv_kernel_size = config.conv_kernel
| 224 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
self.conv_states: torch.Tensor = torch.zeros(
config.num_hidden_layers,
self.max_batch_size,
self.intermediate_size,
self.conv_kernel_size,
device=device,
dtype=dtype,
)
self.ssm_states: torch.Tensor = torch.zeros(
config.num_hidden_layers,
self.max_batch_size,
self.intermediate_size,
self.ssm_state_size,
device=device,
dtype=dtype,
)
torch._dynamo.mark_static_address(self.conv_states)
torch._dynamo.mark_static_address(self.ssm_states)
def update_conv_state(
self, layer_idx: int, new_conv_state: torch.Tensor, cache_position: torch.LongTensor
) -> torch.Tensor:
conv_state = self.conv_states[layer_idx]
cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
| 224 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
conv_state = conv_state.roll(shifts=-1, dims=-1)
conv_state[:, :, cache_position] = new_conv_state.to(device=conv_state.device, dtype=conv_state.dtype)
self.conv_states[layer_idx].zero_()
self.conv_states[layer_idx] += conv_state
return self.conv_states[layer_idx]
def update_ssm_state(self, layer_idx: int, new_ssm_state: torch.Tensor):
self.ssm_states[layer_idx] = new_ssm_state.to(self.ssm_states.device)
return self.ssm_states[layer_idx]
def reset(self):
self.conv_states.zero_()
self.ssm_states.zero_()
@property
def batch_size(self):
logger.warning_once(
f"The 'batch_size' attribute of {self.__class__.__name__} is deprecated and will be removed in "
"v4.49. Use the more precisely named 'self.max_batch_size' attribute instead."
)
return self.max_batch_size
| 224 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
class OffloadedStaticCache(StaticCache):
"""
Static cache class to be used with `torch.compile(model)` that offloads to the CPU or
another device.
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
Args:
config (`PretrainedConfig):
The configuration file defining the shape-related attributes required to initialize
the static cache.
max_batch_size (`int`):
The maximum batch size with which the model will be used.
max_cache_len (`int`):
The maximum sequence length with which the model will be used.
device (`Union[str, torch.device]`):
The device on which the cache should be initialized. Should be the same as the
layer device.
dtype (`torch.dtype`, *optional*):
The default `dtype` to use when initializing the cache.
offload_device (`Union[str, torch.device]`, *optional*, defaults to `cpu`):
The device to offload to. Defaults to CPU.
layer_device_map (`Dict[int, Union[str, torch.device, int]]`, *optional*):
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
Mapping between the layers and its device. This is required when you are manually initializing the cache and the model is splitted between differents gpus.
You can know which layers mapped to which device by checking the associated device_map: `model.hf_device_map`.
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
Attributes:
key_cache (`List[torch.Tensor]`):
Off-loaded key cache tensors. First one will be on device, where-as the others are
off-loaded.
value_cache (`List[torch.Tensor]`):
Off-loaded value cache tensors. First one will be on device, where-as the others are
off-loaded.
max_batch_size (`int`):
The maximum batch size with which this cache can be used.
max_cache_len (`int`):
The maximum sequence length with which this cache can be used.
device (`torch.device`):
The device on which the cache is used.
offload_device (`torch.device`):
The device used to offload to.
dtype (`torch.dtype`):
The `dtype` used to initializing the cache.
Example:
```python
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, OffloadedStaticCache
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
>>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
>>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
>>> inputs = tokenizer(text="My name is GPT2", return_tensors="pt")
>>> # Prepare a cache class and pass it to model's forward
>>> # Leave empty space for 10 new tokens, which can be used when calling forward iteratively 10 times to generate
>>> max_generated_length = inputs.input_ids.shape[1] + 10
>>> past_key_values = OffloadedStaticCache(config=model.config, max_batch_size=1, max_cache_len=max_generated_length, device=model.device, dtype=model.dtype)
>>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
>>> past_kv_length = outputs.past_key_values # access cache filled with key/values from generation
```
"""
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
def __init__(
self,
config: PretrainedConfig,
max_batch_size: int,
max_cache_len: Optional[int],
device: Union[str, torch.device],
dtype: Optional[torch.dtype] = None,
offload_device: Union[str, torch.device] = torch.device("cpu"),
layer_device_map: Optional[Dict[int, Union[str, torch.device, int]]] = None,
) -> None:
self.max_batch_size = max_batch_size
self.max_cache_len = config.max_position_embeddings if max_cache_len is None else max_cache_len
self.device = torch.device(device) if layer_device_map is None else layer_device_map[0]
self.offload_device = torch.device(offload_device)
self.dtype = dtype if dtype is not None else torch.float32
# Some model define a custom `head_dim` != config.hidden_size // config.num_attention_heads
head_dim = config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
num_key_value_heads = (
config.num_attention_heads
if getattr(config, "num_key_value_heads", None) is None
else config.num_key_value_heads
)
cache_shape = (max_batch_size, num_key_value_heads, self.max_cache_len, head_dim)
# Create offloaded CPU tensors.
self.key_cache: List[torch.Tensor] = []
self.value_cache: List[torch.Tensor] = []
for i in range(config.num_hidden_layers):
# First layer is always on-device.
device = self.device if i == 0 else self.offload_device
key_cache, value_cache = self._create_key_value_cache_tensors(cache_shape, device)
self.key_cache.append(key_cache)
self.value_cache.append(value_cache)
# Create device tensors.
self._device_key_cache: List[torch.Tensor] = []
self._device_value_cache: List[torch.Tensor] = []
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
for i in range(2):
key_cache, value_cache = self._create_key_value_cache_tensors(cache_shape, self.device)
self._device_key_cache.append(key_cache)
self._device_value_cache.append(value_cache)
# For backwards compatibility.
# TODO(gante): Remove this.
self._seen_tokens = 0
# Create new CUDA stream for parallel prefetching.
self._prefetch_stream = torch.cuda.Stream() if self.device.type == "cuda" else None
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: Optional[Dict[str, Any]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
It is VERY important to index using a tensor, otherwise you introduce a copy to the device.
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
Parameters:
key_states (`torch.Tensor`):
The new key states to cache.
value_states (`torch.Tensor`):
The new value states to cache.
layer_idx (`int`):
The index of the layer to cache the states for.
cache_kwargs (`Dict[str, Any]`, *optional*):
Additional arguments for the cache subclass. The `OffloadedStaticCache` needs the
`cache_position` input to know how where to write in the cache.
Return:
A tuple containing the updated key and value states.
"""
if layer_idx == 0:
# Update seen tokens.
# TODO(gante): Remove this.
self._seen_tokens += key_states.shape[-2]
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
# Always there.
k_out = self.key_cache[0]
v_out = self.value_cache[0]
else:
# Wait for prefetch stream.
if self._prefetch_stream is not None:
torch.cuda.default_stream(self.device).wait_stream(self._prefetch_stream)
k_out = self._device_key_cache[layer_idx & 1]
v_out = self._device_value_cache[layer_idx & 1]
self._prefetch_layer(layer_idx + 1)
cache_position = cache_kwargs.get("cache_position") if cache_kwargs is not None else None
if cache_position is None:
k_out.copy_(key_states)
v_out.copy_(value_states)
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
# Copy the values to the offloaded device as well.
if layer_idx == 0:
self.key_cache[layer_idx].copy_(key_states.to(self.offload_device))
self.value_cache[layer_idx].copy_(value_states.to(self.offload_device))
else:
# Note: here we use `tensor.index_copy_(dim, index, tensor)` that is equivalent to
# `tensor[:, :, index] = tensor`, but the first one is compile-friendly and it does
# explicitly an in-place operation, that avoids copies and uses less memory.
try:
k_out.index_copy_(2, cache_position, key_states)
v_out.index_copy_(2, cache_position, value_states)
except NotImplementedError:
# The operator 'aten::index_copy.out' is not currently implemented for the MPS
# device.
k_out[:, :, cache_position] = key_states
v_out[:, :, cache_position] = value_states
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
# Copy the values to the offloaded device as well.
if layer_idx != 0:
cache_position = cache_position.to(self.offload_device)
key_states = key_states.to(self.offload_device)
value_states = value_states.to(self.offload_device)
try:
self.key_cache[layer_idx].index_copy_(2, cache_position, key_states)
self.value_cache[layer_idx].index_copy_(2, cache_position, value_states)
except NotImplementedError:
# The operator 'aten::index_copy.out' is not currently implemented for the MPS
# device.
self.key_cache[layer_idx][:, :, cache_position] = key_states
self.value_cache[layer_idx][:, :, cache_position] = value_states
return k_out, v_out
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
"""Returns the sequence length of the cached states that were seen by the model."""
# TODO(gante): Remove this.
return self._seen_tokens
def get_max_cache_shape(self) -> Optional[int]:
"""Returns the maximum sequence length of the cached states."""
return self.max_cache_len
def reset(self) -> None:
"""Resets the cache values while preserving the objects."""
# For backwards compatibility.
# TODO(gante): Remove this.
self._seen_tokens = 0
# Zero out cache.
for layer_idx in range(len(self.key_cache)):
# In-place ops prevent breaking the static address.
self.key_cache[layer_idx].zero_()
self.value_cache[layer_idx].zero_()
@property
def seen_tokens(self) -> int:
# For backwards compatibility.
# TODO(gante): Remove this.
return self._seen_tokens
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
def _create_key_value_cache_tensors(
self, shape: Tuple[int, ...], device: torch.device
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Creates K/V cache tensors on a device. Pins memory for CPU tensors. Marks them as static
addresses for non-CPU tensors.
Args:
shape (`Tuple[int, ...]`): Shape.
device (`torch.device`): Device.
Returns:
Key and value cache tensors as a tuple.
"""
is_cpu_device = device == torch.device("cpu")
key_cache = torch.zeros(shape, dtype=self.dtype, device=device, pin_memory=is_cpu_device)
value_cache = torch.zeros(shape, dtype=self.dtype, device=device, pin_memory=is_cpu_device)
# Note: `mark_static_address` is used to tag the cache as a fixed data pointer,
# preventing compiled graph breaks when updating the cache.
torch._dynamo.mark_static_address(key_cache)
torch._dynamo.mark_static_address(value_cache)
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
return key_cache, value_cache
def _prefetch_layer(self, layer_idx: int) -> None:
"""Prefetch a layer to the device. Needs to be called in order of layer indices."""
# Don't fetch layers that do not exist.
if layer_idx >= len(self.key_cache):
return
# Alternate between two on-device caches.
if self._prefetch_stream is not None:
with torch.cuda.stream(self._prefetch_stream):
self._prefetch_layer_in_context(layer_idx)
else:
self._prefetch_layer_in_context(layer_idx)
def _prefetch_layer_in_context(self, layer_idx: int) -> None:
"""Performs the actual copy of the layer to device cache."""
self._device_key_cache[layer_idx & 1].copy_(self.key_cache[layer_idx], non_blocking=True)
self._device_value_cache[layer_idx & 1].copy_(self.value_cache[layer_idx], non_blocking=True)
| 225 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/cache_utils.py
|
class AttentionMaskConverter:
"""
A utility attention mask class that allows one to:
- Create a causal 4d mask
- Create a causal 4d mask with slided window
- Convert a 2d attention mask (batch_size, query_length) to a 4d attention mask (batch_size, 1, query_length,
key_value_length) that can be multiplied with attention scores
Examples:
```python
>>> import torch
>>> from transformers.modeling_attn_mask_utils import AttentionMaskConverter
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
>>> converter = AttentionMaskConverter(True)
>>> converter.to_4d(torch.tensor([[0, 0, 0, 1, 1]]), 5, key_value_length=5, dtype=torch.float32)
tensor([[[[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
[-3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38, -3.4028e+38],
[-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, -3.4028e+38],
[-3.4028e+38, -3.4028e+38, -3.4028e+38, 0.0000e+00, 0.0000e+00]]]])
```
Parameters:
is_causal (`bool`):
Whether the attention mask should be a uni-directional (causal) or bi-directional mask.
sliding_window (`int`, *optional*):
Optionally, the sliding window masks can be created if `sliding_window` is defined to a positive integer.
"""
is_causal: bool
sliding_window: int
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
def __init__(self, is_causal: bool, sliding_window: Optional[int] = None):
self.is_causal = is_causal
self.sliding_window = sliding_window
if self.sliding_window is not None and self.sliding_window <= 0:
raise ValueError(
f"Make sure that when passing `sliding_window` that its value is a strictly positive integer, not `{self.sliding_window}`"
)
def to_causal_4d(
self,
batch_size: int,
query_length: int,
key_value_length: int,
dtype: torch.dtype,
device: Union[torch.device, "str"] = "cpu",
) -> Optional[torch.Tensor]:
"""
Creates a causal 4D mask of (bsz, head_dim=1, query_length, key_value_length) shape and adds large negative
bias to upper right hand triangular matrix (causal mask).
"""
if not self.is_causal:
raise ValueError(f"Please use `to_causal_4d` only if {self.__class__} has `is_causal` set to True.")
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
# If shape is not cached, create a new causal mask and cache it
input_shape = (batch_size, query_length)
past_key_values_length = key_value_length - query_length
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
causal_4d_mask = None
if input_shape[-1] > 1 or self.sliding_window is not None:
causal_4d_mask = self._make_causal_mask(
input_shape,
dtype,
device=device,
past_key_values_length=past_key_values_length,
sliding_window=self.sliding_window,
)
return causal_4d_mask
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
def to_4d(
self,
attention_mask_2d: torch.Tensor,
query_length: int,
dtype: torch.dtype,
key_value_length: Optional[int] = None,
) -> torch.Tensor:
"""
Converts 2D attention mask to 4D attention mask by expanding mask to (bsz, head_dim=1, query_length,
key_value_length) shape and by adding a large negative bias to not-attended positions. If attention_mask is
causal, a causal mask will be added.
"""
input_shape = (attention_mask_2d.shape[0], query_length)
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
causal_4d_mask = None
if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal:
if key_value_length is None:
raise ValueError(
"This attention mask converter is causal. Make sure to pass `key_value_length` to correctly create a causal mask."
)
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
past_key_values_length = key_value_length - query_length
causal_4d_mask = self._make_causal_mask(
input_shape,
dtype,
device=attention_mask_2d.device,
past_key_values_length=past_key_values_length,
sliding_window=self.sliding_window,
)
elif self.sliding_window is not None:
raise NotImplementedError("Sliding window is currently only implemented for causal masking")
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = self._expand_mask(attention_mask_2d, dtype, tgt_len=input_shape[-1]).to(
attention_mask_2d.device
)
if causal_4d_mask is not None:
expanded_attn_mask = causal_4d_mask.masked_fill(expanded_attn_mask.bool(), torch.finfo(dtype).min)
# expanded_attn_mask + causal_4d_mask can cause some overflow
expanded_4d_mask = expanded_attn_mask
return expanded_4d_mask
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
@staticmethod
def _make_causal_mask(
input_ids_shape: torch.Size,
dtype: torch.dtype,
device: torch.device,
past_key_values_length: int = 0,
sliding_window: Optional[int] = None,
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
# add lower triangular sliding window mask if necessary
if sliding_window is not None:
diagonal = past_key_values_length - sliding_window - 1
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
context_mask = torch.tril(torch.ones_like(mask, dtype=torch.bool), diagonal=diagonal)
# Recent changes in PyTorch prevent mutations on tensors converted with aten::_to_copy
# See https://github.com/pytorch/pytorch/issues/127571
if is_torchdynamo_compiling():
mask = mask.clone()
mask.masked_fill_(context_mask, torch.finfo(dtype).min)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
@staticmethod
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
@staticmethod
def _unmask_unattended(
expanded_mask: torch.FloatTensor,
min_dtype: float,
):
# fmt: off
"""
Attend to all tokens in masked rows from the expanded attention mask, for example the relevant first rows when
using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
Details: https://github.com/pytorch/pytorch/issues/110213
`expanded_mask` is [bsz, num_masks, tgt_seq_len, src_seq_len] or [bsz, tgt_seq_len, src_seq_len].
`attention_mask` is [bsz, src_seq_len].
The dimension num_masks of `expanded_mask` is most often 1, but it can also be the number of heads in the case of alibi attention bias.
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
For example, if `expanded_mask` is (e.g. here left-padding case)
```
[[[[0, 0, 0],
[0, 0, 0],
[0, 0, 1]]],
[[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]]],
[[[0, 0, 0],
[0, 1, 0],
[0, 1, 1]]]]
```
then the modified `expanded_mask` will be
```
[[[[1, 1, 1], <-- modified
[1, 1, 1], <-- modified
[0, 0, 1]]],
[[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]]],
[[[1, 1, 1], <-- modified
[0, 1, 0],
[0, 1, 1]]]]
```
"""
# fmt: on
if expanded_mask.dtype == torch.bool:
raise ValueError(
"AttentionMaskConverter._unmask_unattended expects a float `expanded_mask`, got a BoolTensor."
)
return expanded_mask.mul(~torch.all(expanded_mask == min_dtype, dim=-1, keepdim=True))
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
@staticmethod
def _ignore_causal_mask_sdpa(
attention_mask: Optional[torch.Tensor],
inputs_embeds: torch.Tensor,
past_key_values_length: int,
sliding_window: Optional[int] = None,
is_training: bool = False,
) -> bool:
"""
Detects whether the optional user-specified attention_mask & the automatically created causal mask can be
ignored in case PyTorch's SDPA is used, rather relying on SDPA's `is_causal` argument.
In case no token is masked in the `attention_mask` argument, if `query_length == 1` or
`key_value_length == query_length`, we rather rely on SDPA `is_causal` argument to use causal/non-causal masks,
allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is
passed).
"""
_, query_length = inputs_embeds.shape[0], inputs_embeds.shape[1]
key_value_length = query_length + past_key_values_length
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
is_tracing = torch.jit.is_tracing() or isinstance(inputs_embeds, torch.fx.Proxy) or is_torchdynamo_compiling()
ignore_causal_mask = False
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
if attention_mask is None:
# TODO: When tracing with TorchDynamo with fullgraph=True, the model is recompiled depending on the input
# shape, thus SDPA's `is_causal` argument is rightfully updated
# (see https://gist.github.com/fxmarty/1313f39037fc1c112508989628c57363). However, when using
# `torch.export` or `torch.onnx.dynamo_export`, we must pass an example input, and `is_causal` behavior is
# hard-coded. If a user exports a model with q_len > 1, the exported model will hard-code `is_causal=True`
# which is in general wrong (see https://github.com/pytorch/pytorch/issues/108108).
# Thus, we only set `ignore_causal_mask = True` if the model is set to training.
#
# Besides, jit.trace can not handle the `q_len > 1` condition for `is_causal`
# ("TypeError: scaled_dot_product_attention(): argument 'is_causal' must be bool, not Tensor").
if (
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
(is_training or not is_tracing)
and (query_length == 1 or key_value_length == query_length)
and (sliding_window is None or key_value_length < sliding_window)
):
ignore_causal_mask = True
elif sliding_window is None or key_value_length < sliding_window:
if len(attention_mask.shape) == 4:
return False
elif not is_tracing and torch.all(attention_mask == 1):
if query_length == 1 or key_value_length == query_length:
# For query_length == 1, causal attention and bi-directional attention are the same.
ignore_causal_mask = True
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
# Unfortunately, for query_length > 1 and key_value_length != query_length, we cannot generally ignore
# the attention mask, as SDPA causal mask generation may be wrong. We will set `is_causal=False` in
# SDPA and rely on Transformers attention_mask instead, hence not setting it to None here.
# Reference: https://github.com/pytorch/pytorch/issues/108108
# TODO: maybe revisit this with https://github.com/pytorch/pytorch/pull/114823 in PyTorch 2.3.
return ignore_causal_mask
| 226 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_attn_mask_utils.py
|
class TransposeType(ExplicitEnum):
"""
Possible ...
"""
NO = "no"
SIMPLE = "simple"
CONV1D = "conv1d"
CONV2D = "conv2d"
| 227 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_tf_pytorch_utils.py
|
class PreTrainedTokenizerFast(PreTrainedTokenizerBase):
"""
Base class for all fast tokenizers (wrapping HuggingFace tokenizers library).
Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`].
Handles all the shared methods for tokenization and special tokens, as well as methods for
downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary.
This class also contains the added tokens in a unified way on top of all tokenizers so we don't have to handle the
specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
"""
vocab_files_names = VOCAB_FILES_NAMES
slow_tokenizer_class: PreTrainedTokenizer = None
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
def __init__(self, *args, **kwargs):
tokenizer_object = kwargs.pop("tokenizer_object", None)
slow_tokenizer = kwargs.pop("__slow_tokenizer", None)
gguf_file = kwargs.pop("gguf_file", None)
fast_tokenizer_file = kwargs.pop("tokenizer_file", None)
from_slow = kwargs.pop("from_slow", False)
added_tokens_decoder = kwargs.pop("added_tokens_decoder", {})
self.add_prefix_space = kwargs.get("add_prefix_space", False)
if from_slow and slow_tokenizer is None and self.slow_tokenizer_class is None:
raise ValueError(
"Cannot instantiate this tokenizer from a slow version. If it's based on sentencepiece, make sure you "
"have sentencepiece installed."
)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
if tokenizer_object is not None:
fast_tokenizer = copy.deepcopy(tokenizer_object)
elif fast_tokenizer_file is not None and not from_slow:
# We have a serialization from tokenizers which let us directly build the backend
fast_tokenizer = TokenizerFast.from_file(fast_tokenizer_file)
elif slow_tokenizer:
# We need to convert a slow tokenizer to build the backend
fast_tokenizer = convert_slow_tokenizer(slow_tokenizer)
elif gguf_file is not None:
# We need to convert a slow tokenizer to build the backend
gguf_param = load_gguf_checkpoint(kwargs.get("vocab_file"))
architecture = gguf_param["config"]["model_type"]
tokenizer_dict = gguf_param["tokenizer"]
tokenizer_config = gguf_param["tokenizer_config"]
fast_tokenizer, additional_kwargs = convert_gguf_tokenizer(architecture, tokenizer_dict)
kwargs.update(tokenizer_config)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
if len(additional_kwargs) > 0:
kwargs.update(additional_kwargs)
elif self.slow_tokenizer_class is not None and slow_tokenizer is not False:
# We need to create and convert a slow tokenizer to build the backend
slow_tokenizer = self.slow_tokenizer_class(*args, **kwargs)
fast_tokenizer = convert_slow_tokenizer(slow_tokenizer)
elif not slow_tokenizer:
# We tried loading a slow_tokenizer with spm and failed, try to load with tiktoken
self.vocab_file = kwargs.get("vocab_file", None)
self.additional_special_tokens = kwargs.get("additional_special_tokens", [])
fast_tokenizer = convert_slow_tokenizer(self, from_tiktoken=True)
slow_tokenizer = None
else:
raise ValueError(
"Couldn't instantiate the backend tokenizer from one of: \n"
"(1) a `tokenizers` library serialization file, \n"
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
"(2) a slow tokenizer instance to convert or \n"
"(3) an equivalent slow tokenizer class to instantiate and convert. \n"
"You need to have sentencepiece or tiktoken installed to convert a slow tokenizer to a fast one."
)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
self._tokenizer = fast_tokenizer
if slow_tokenizer is not None:
kwargs.update(slow_tokenizer.init_kwargs)
self._decode_use_source_tokenizer = False
_truncation = self._tokenizer.truncation
if _truncation is not None:
self._tokenizer.enable_truncation(**_truncation)
kwargs.setdefault("max_length", _truncation["max_length"])
kwargs.setdefault("truncation_side", _truncation["direction"])
kwargs.setdefault("stride", _truncation["stride"])
kwargs.setdefault("truncation_strategy", _truncation["strategy"])
else:
self._tokenizer.no_truncation()
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
_padding = self._tokenizer.padding
if _padding is not None:
self._tokenizer.enable_padding(**_padding)
kwargs.setdefault("pad_token", _padding["pad_token"])
kwargs.setdefault("pad_token_type_id", _padding["pad_type_id"])
kwargs.setdefault("padding_side", _padding["direction"])
kwargs.setdefault("max_length", _padding["length"])
kwargs.setdefault("pad_to_multiple_of", _padding["pad_to_multiple_of"])
# We call this after having initialized the backend tokenizer because we update it.
super().__init__(**kwargs)
self._tokenizer.encode_special_tokens = self.split_special_tokens
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
added_tokens_decoder_hash = {hash(repr(token)) for token in self.added_tokens_decoder}
tokens_to_add = [
token
for index, token in sorted(added_tokens_decoder.items(), key=lambda x: x[0])
if hash(repr(token)) not in added_tokens_decoder_hash
]
encoder = list(self.added_tokens_encoder.keys()) + [str(token) for token in tokens_to_add]
# if some of the special tokens are strings, we check if we don't already have a token
tokens_to_add += [
token for token in self.all_special_tokens_extended if token not in encoder and token not in tokens_to_add
]
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
if len(tokens_to_add) > 0:
tokens = []
special_tokens = self.all_special_tokens
for token in tokens_to_add:
is_special = (
(token.special or str(token) in special_tokens)
if isinstance(token, AddedToken)
else str(token) in special_tokens
)
if isinstance(token, str):
token = AddedToken(token, special=is_special)
else:
token.special = is_special
tokens.append(token)
if tokens:
self.add_tokens(tokens)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
try:
pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space", self.add_prefix_space) != self.add_prefix_space:
pre_tok_class = getattr(pre_tokenizers_fast, pre_tok_state.pop("type"))
pre_tok_state["add_prefix_space"] = self.add_prefix_space
self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
except Exception:
# We'll get an error if there is no pre_tokenizer, or if it's a custom pre_tokenizer that can
# not be serialized. In those cases, we just ignore the error as there's no pre_tokenizer
# for which we need to update the `add_prefix_space` attribute.
pass
@property
def is_fast(self) -> bool:
return True
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
@property
def can_save_slow_tokenizer(self) -> bool:
"""
`bool`: Whether or not the slow tokenizer can be saved. Usually for sentencepiece based slow tokenizer, this
can only be `True` if the original `"sentencepiece.model"` was not deleted.
"""
return True
@property
def vocab_size(self) -> int:
"""
`int`: Size of the base vocabulary (without the added tokens).
"""
return self._tokenizer.get_vocab_size(with_added_tokens=False)
def get_vocab(self) -> Dict[str, int]:
return self._tokenizer.get_vocab(with_added_tokens=True)
@property
def vocab(self) -> Dict[str, int]:
return self.get_vocab()
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
@property
def added_tokens_encoder(self) -> Dict[str, int]:
"""
Returns the sorted mapping from string to index. The added tokens encoder is cached for performance
optimisation in `self._added_tokens_encoder` for the slow tokenizers.
"""
return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])}
@property
def added_tokens_decoder(self) -> Dict[int, AddedToken]:
"""
Returns the added tokens in the vocabulary as a dictionary of index to AddedToken.
Returns:
`Dict[str, int]`: The added tokens.
"""
return self._tokenizer.get_added_tokens_decoder()
def get_added_vocab(self) -> Dict[str, int]:
"""
Returns the added tokens in the vocabulary as a dictionary of token to index.
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
Returns:
`Dict[str, int]`: The added tokens.
"""
return {k.content: v for v, k in sorted(self.added_tokens_decoder.items(), key=lambda item: item[0])}
def __len__(self) -> int:
"""
Size of the full vocabulary with the added tokens.
"""
return self._tokenizer.get_vocab_size(with_added_tokens=True)
@property
def backend_tokenizer(self) -> TokenizerFast:
"""
`tokenizers.implementations.BaseTokenizer`: The Rust tokenizer used as a backend.
"""
return self._tokenizer
@property
def decoder(self) -> DecoderFast:
"""
`tokenizers.decoders.Decoder`: The Rust decoder for this tokenizer.
"""
return self._tokenizer.decoder
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
def _convert_encoding(
self,
encoding: EncodingFast,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> Tuple[Dict[str, Any], List[EncodingFast]]:
"""
Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list
of encodings, take care of building a batch from overflowing tokens.
Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are
lists (overflows) of lists (tokens).
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
Output shape: (overflows, sequence length)
"""
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if return_overflowing_tokens and encoding.overflowing is not None:
encodings = [encoding] + encoding.overflowing
else:
encodings = [encoding]
encoding_dict = defaultdict(list)
for e in encodings:
encoding_dict["input_ids"].append(e.ids)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
if return_token_type_ids:
encoding_dict["token_type_ids"].append(e.type_ids)
if return_attention_mask:
encoding_dict["attention_mask"].append(e.attention_mask)
if return_special_tokens_mask:
encoding_dict["special_tokens_mask"].append(e.special_tokens_mask)
if return_offsets_mapping:
encoding_dict["offset_mapping"].append(e.offsets)
if return_length:
encoding_dict["length"].append(len(e.ids))
return encoding_dict, encodings
def convert_tokens_to_ids(self, tokens: Union[str, Iterable[str]]) -> Union[int, List[int]]:
"""
Converts a token string (or a sequence of tokens) in a single integer id (or a Iterable of ids), using the
vocabulary.
Args:
tokens (`str` or `Iterable[str]`): One or several token(s) to convert to token id(s).
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
Returns:
`int` or `List[int]`: The token id or list of token ids.
"""
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
return [self._convert_token_to_id_with_added_voc(token) for token in tokens]
def _convert_token_to_id_with_added_voc(self, token: str) -> int:
index = self._tokenizer.token_to_id(token)
if index is None:
return self.unk_token_id
return index
def _convert_id_to_token(self, index: int) -> Optional[str]:
return self._tokenizer.id_to_token(int(index))
def _add_tokens(self, new_tokens: List[Union[str, AddedToken]], special_tokens=False) -> int:
if special_tokens:
return self._tokenizer.add_special_tokens(new_tokens)
return self._tokenizer.add_tokens(new_tokens)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
def num_special_tokens_to_add(self, pair: bool = False) -> int:
"""
Returns the number of added tokens when encoding a sequence with special tokens.
<Tip>
This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put
this inside your training loop.
</Tip>
Args:
pair (`bool`, *optional*, defaults to `False`):
Whether the number of added tokens should be computed in the case of a sequence pair or a single
sequence.
Returns:
`int`: Number of special tokens added to sequences.
"""
return self._tokenizer.num_special_tokens_to_add(pair)
def convert_ids_to_tokens(
self, ids: Union[int, List[int]], skip_special_tokens: bool = False
) -> Union[str, List[str]]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
added tokens.
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
Args:
ids (`int` or `List[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
Returns:
`str` or `List[str]`: The decoded token(s).
"""
if isinstance(ids, int):
return self._tokenizer.id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
tokens.append(self._tokenizer.id_to_token(index))
return tokens
def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
return self.encode_plus(text=text, text_pair=pair, add_special_tokens=add_special_tokens, **kwargs).tokens()
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
def set_truncation_and_padding(
self,
padding_strategy: PaddingStrategy,
truncation_strategy: TruncationStrategy,
max_length: int,
stride: int,
pad_to_multiple_of: Optional[int],
padding_side: Optional[bool],
):
"""
Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers
library) and restore the tokenizer settings afterwards.
The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a
padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed
section.
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
Args:
padding_strategy ([`~utils.PaddingStrategy`]):
The kind of padding that will be applied to the input
truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`]):
The kind of truncation that will be applied to the input
max_length (`int`):
The maximum size of a sequence.
stride (`int`):
The stride to use when handling overflow.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
"""
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
_truncation = self._tokenizer.truncation
_padding = self._tokenizer.padding
# Set truncation and padding on the backend tokenizer
if truncation_strategy == TruncationStrategy.DO_NOT_TRUNCATE:
if _truncation is not None:
self._tokenizer.no_truncation()
else:
target = {
"max_length": max_length,
"stride": stride,
"strategy": truncation_strategy.value,
"direction": self.truncation_side,
}
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
# _truncation might contain more keys that the target `transformers`
# supports. Use only the target keys to trigger `enable_truncation`.
# This should enable this code to works on various `tokenizers`
# targets.
if _truncation is None:
current = None
else:
current = {k: _truncation.get(k, None) for k in target}
if current != target:
self._tokenizer.enable_truncation(**target)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
if padding_strategy == PaddingStrategy.DO_NOT_PAD:
if _padding is not None:
self._tokenizer.no_padding()
else:
length = max_length if padding_strategy == PaddingStrategy.MAX_LENGTH else None
target = {
"length": length,
"direction": padding_side if padding_side is not None else self.padding_side,
"pad_id": self.pad_token_id,
"pad_token": self.pad_token,
"pad_type_id": self.pad_token_type_id,
"pad_to_multiple_of": pad_to_multiple_of,
}
if _padding != target:
self._tokenizer.enable_padding(**target)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair]
],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[bool] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
split_special_tokens: bool = False,
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, (tuple, list)):
raise TypeError(
f"batch_text_or_text_pairs has to be a list or a tuple (got {type(batch_text_or_text_pairs)})"
)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
# Set the truncation and padding strategy and restore the initial configuration
self.set_truncation_and_padding(
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
)
if self._tokenizer.encode_special_tokens != split_special_tokens:
self._tokenizer.encode_special_tokens = split_special_tokens
encodings = self._tokenizer.encode_batch(
batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
is_pretokenized=is_split_into_words,
)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
# Convert encoding to dict
# `Tokens` has type: Tuple[
# List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
# List[EncodingFast]
# ]
# with nested dimensions corresponding to batch, overflows, sequence length
tokens_and_encodings = [
self._convert_encoding(
encoding=encoding,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
)
for encoding in encodings
]
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
# Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
# From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
# (we say ~ because the number of overflow varies with the example in the batch)
#
# To match each overflowing sample with the original sample in the batch
# we add an overflow_to_sample_mapping array (see below)
sanitized_tokens = {}
for key in tokens_and_encodings[0][0].keys():
stack = [e for item, _ in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
# If returning overflowing tokens, we need to return a mapping
# from the batch idx to the original sample
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for i, (toks, _) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += [i] * len(toks["input_ids"])
sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
for input_ids in sanitized_tokens["input_ids"]:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[bool] = None,
return_tensors: Optional[bool] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
split_special_tokens: bool = False,
**kwargs,
) -> BatchEncoding:
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
batched_input = [(text, text_pair)] if text_pair else [text]
batched_output = self._batch_encode_plus(
batched_input,
is_split_into_words=is_split_into_words,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
split_special_tokens=split_special_tokens,
**kwargs,
)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
# Return tensor is None, then we can remove the leading batch axis
# Overflowing tokens are returned as a batch of output so we keep them in this case
if return_tensors is None and not return_overflowing_tokens:
batched_output = BatchEncoding(
{
key: (value[0] if len(value) > 0 and isinstance(value[0], list) else value)
for key, value in batched_output.items()
},
batched_output.encodings,
)
self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
return batched_output
def convert_tokens_to_string(self, tokens: List[str]) -> str:
return (
self.backend_tokenizer.decoder.decode(tokens)
if self.backend_tokenizer.decoder is not None
else " ".join(tokens)
)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
def _decode(
self,
token_ids: Union[int, List[int]],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = None,
**kwargs,
) -> str:
self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
if isinstance(token_ids, int):
token_ids = [token_ids]
text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)
clean_up_tokenization_spaces = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
def _save_pretrained(
self,
save_directory: Union[str, os.PathLike],
file_names: Tuple[str],
legacy_format: Optional[bool] = None,
filename_prefix: Optional[str] = None,
) -> Tuple[str]:
"""
Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens as well as in a unique JSON
file containing {config + vocab + added-tokens}.
"""
save_directory = str(save_directory)
if self.slow_tokenizer_class is None and legacy_format is True:
raise ValueError(
"Your tokenizer does not have a legacy version defined and therefore cannot register this version. You"
" might consider leaving the legacy_format at `None` or setting it to `False`."
)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
save_slow = (
(legacy_format is None or legacy_format is True)
and self.slow_tokenizer_class is not None
and self.can_save_slow_tokenizer
)
save_fast = legacy_format is None or legacy_format is False
if save_slow:
added_tokens_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE
)
# make sure to be foward compatible
added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size}
if added_vocab:
with open(added_tokens_file, "w", encoding="utf-8") as f:
out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
f.write(out_str)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix)
file_names = file_names + vocab_files + (added_tokens_file,)
if save_fast:
tokenizer_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_FILE
)
self.backend_tokenizer.save(tokenizer_file)
file_names = file_names + (tokenizer_file,)
return file_names
def train_new_from_iterator(
self,
text_iterator,
vocab_size,
length=None,
new_special_tokens=None,
special_tokens_map=None,
**kwargs,
):
"""
Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline)
as the current one.
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
Args:
text_iterator (generator of `List[str]`):
The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts
if you have everything in memory.
vocab_size (`int`):
The size of the vocabulary you want for your tokenizer.
length (`int`, *optional*):
The total number of sequences in the iterator. This is used to provide meaningful progress tracking
new_special_tokens (list of `str` or `AddedToken`, *optional*):
A list of new special tokens to add to the tokenizer you are training.
special_tokens_map (`Dict[str, str]`, *optional*):
If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special
token name to new special token name in this argument.
kwargs (`Dict[str, Any]`, *optional*):
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
Additional keyword arguments passed along to the trainer from the 🤗 Tokenizers library.
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
Returns:
[`PreTrainedTokenizerFast`]: A new tokenizer of the same type as the original one, trained on
`text_iterator`.
"""
tokenizer_json = json.loads(self._tokenizer.to_str())
# Remove added tokens for now (uses IDs of tokens)
added_tokens = tokenizer_json.pop("added_tokens")
# Remove post processor for now (uses IDs of tokens)
post_processor = tokenizer_json.pop("post_processor")
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
unk_token = None
# Remove vocab
if tokenizer_json["model"]["type"] == "BPE":
tokenizer_json["model"]["vocab"] = {}
tokenizer_json["model"]["merges"] = []
elif tokenizer_json["model"]["type"] == "Unigram":
if tokenizer_json["model"]["unk_id"] is not None:
unk_id = tokenizer_json["model"]["unk_id"]
unk_token = tokenizer_json["model"]["vocab"][unk_id][0]
if special_tokens_map is not None and unk_token in special_tokens_map:
unk_token = special_tokens_map[unk_token]
tokenizer_json["model"]["unk_id"] = 0
tokenizer_json["model"]["vocab"] = [[unk_token, 0.0]]
elif tokenizer_json["model"]["type"] in ["WordLevel", "WordPiece"]:
tokenizer_json["model"]["vocab"] = {}
else:
raise ValueError(
f"This method does not support this type of tokenizer (found {tokenizer_json['model']['type']}) "
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
"only BPE, Unigram, WordLevel and WordPiece."
)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
if (
special_tokens_map is not None
and "unk_token" in tokenizer_json["model"]
and tokenizer_json["model"]["unk_token"] in special_tokens_map
):
tokenizer_json["model"]["unk_token"] = special_tokens_map[tokenizer_json["model"]["unk_token"]]
tokenizer = TokenizerFast.from_str(json.dumps(tokenizer_json))
# Get the special tokens from the current tokenizer if none are specified.
special_tokens = []
for added_token in added_tokens:
special = added_token.pop("special", None)
_ = added_token.pop("id", None)
if tokenizer_json["model"]["type"] != "Unigram" and not special:
continue
if special_tokens_map is not None and added_token["content"] in special_tokens_map:
added_token["content"] = special_tokens_map[added_token["content"]]
special_tokens.append(AddedToken(**added_token))
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
if new_special_tokens is not None:
special_tokens.extend(new_special_tokens)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
# Trainer needs to know the end of word / continuing subword thingies in BPE
if (
tokenizer_json["model"]["type"] == "BPE"
and "continuing_subword_prefix" not in kwargs
and tokenizer_json["model"]["continuing_subword_prefix"] is not None
):
kwargs["continuing_subword_prefix"] = tokenizer_json["model"]["continuing_subword_prefix"]
if (
tokenizer_json["model"]["type"] == "BPE"
and "end_of_word_suffix" not in kwargs
and tokenizer_json["model"]["end_of_word_suffix"] is not None
):
kwargs["end_of_word_suffix"] = tokenizer_json["model"]["end_of_word_suffix"]
if tokenizer_json["model"]["type"] == "Unigram" and unk_token is not None:
kwargs["unk_token"] = unk_token
if tokenizer_json["pre_tokenizer"] is not None:
if (
tokenizer_json["pre_tokenizer"]["type"] == "ByteLevel"
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
or tokenizer_json["pre_tokenizer"]["type"] == "Sequence"
and "pretokenizers" in tokenizer_json["pre_tokenizer"]
and any(
pretokenizer["type"] == "ByteLevel"
for pretokenizer in tokenizer_json["pre_tokenizer"]["pretokenizers"]
)
):
kwargs["initial_alphabet"] = pre_tokenizers_fast.ByteLevel.alphabet()
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
trainer_class = MODEL_TO_TRAINER_MAPPING[tokenizer_json["model"]["type"]]
trainer = trainer_class(vocab_size=vocab_size, special_tokens=special_tokens, **kwargs)
tokenizer.train_from_iterator(text_iterator, length=length, trainer=trainer)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
if post_processor is not None:
trained_tokenizer_json = json.loads(tokenizer.to_str())
# Almost done, we just have to adjust the token IDs in the post processor
if "special_tokens" in post_processor:
for key in post_processor["special_tokens"]:
tokens = post_processor["special_tokens"][key]["tokens"]
if special_tokens_map is not None:
tokens = [special_tokens_map.get(token, token) for token in tokens]
post_processor["special_tokens"][key]["tokens"] = tokens
for token in tokens:
token_id = tokenizer.token_to_id(token)
if token_id is None:
raise ValueError(
"Attempted to set a token in the post processor that does not exist in the mapping"
)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
post_processor["special_tokens"][key]["ids"] = [tokenizer.token_to_id(token) for token in tokens]
for special_token in ["cls", "sep"]:
if special_token in post_processor:
token, _ = post_processor[special_token]
if special_tokens_map is not None and token in special_tokens_map:
token = special_tokens_map[token]
token_id = tokenizer.token_to_id(token)
if token_id is None:
raise ValueError(
"Attempted to set a token in the post processor that does not exist in the mapping"
)
post_processor[special_token] = [token, token_id]
trained_tokenizer_json["post_processor"] = post_processor
tokenizer = TokenizerFast.from_str(json.dumps(trained_tokenizer_json))
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
kwargs = self.init_kwargs.copy()
# Map pad/cls/mask token at the Transformers level
special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_list.remove("additional_special_tokens")
for token in special_tokens_list:
if getattr(self, token) is not None:
special_token = getattr(self, token)
if special_tokens_map is not None and special_token in special_tokens_map:
special_token = special_tokens_map[special_token]
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
special_token_full = self._special_tokens_map.get(token, None)
if isinstance(special_token_full, AddedToken):
# Create an added token with the same parameters except the content
kwargs[token] = AddedToken(
special_token,
single_word=special_token_full.single_word,
lstrip=special_token_full.lstrip,
rstrip=special_token_full.rstrip,
normalized=special_token_full.normalized,
special=True,
)
else:
kwargs[token] = special_token
additional_special_tokens = self.additional_special_tokens
if new_special_tokens is not None:
additional_special_tokens.extend(new_special_tokens)
if len(additional_special_tokens) > 0:
kwargs["additional_special_tokens"] = additional_special_tokens
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
return self.__class__(tokenizer_object=tokenizer, **kwargs)
| 228 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/tokenization_utils_fast.py
|
class ModuleUtilsMixin:
"""
A few utilities for `torch.nn.Modules`, to be used as a mixin.
"""
@staticmethod
def _hook_rss_memory_pre_forward(module, *args, **kwargs):
try:
import psutil
except ImportError:
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_pre_forward = mem.rss
return None
@staticmethod
def _hook_rss_memory_post_forward(module, *args, **kwargs):
try:
import psutil
except ImportError:
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
| 229 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
|
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_post_forward = mem.rss
mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
return None
def add_memory_hooks(self):
"""
Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.
Increase in memory consumption is stored in a `mem_rss_diff` attribute for each module and can be reset to zero
with `model.reset_memory_hooks_state()`.
"""
for module in self.modules():
module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
module.register_forward_hook(self._hook_rss_memory_post_forward)
self.reset_memory_hooks_state()
| 229 |
/Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/modeling_utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.