Modify modeling_minicpm.py to use LSE compression
Browse filesModify modeling_minicpm.py to use LSE compression.
- modeling_minicpm.py +373 -599
modeling_minicpm.py
CHANGED
|
@@ -21,10 +21,10 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
|
| 21 |
import torch
|
| 22 |
import torch.nn.functional as F
|
| 23 |
import torch.utils.checkpoint
|
| 24 |
-
from torch import
|
| 25 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 26 |
from transformers.activations import ACT2FN
|
| 27 |
-
from transformers.cache_utils import Cache, DynamicCache
|
| 28 |
from transformers.modeling_attn_mask_utils import (
|
| 29 |
AttentionMaskConverter,
|
| 30 |
_prepare_4d_attention_mask,
|
|
@@ -47,7 +47,9 @@ from transformers.utils import (
|
|
| 47 |
)
|
| 48 |
from transformers.utils.import_utils import is_torch_fx_available
|
| 49 |
|
| 50 |
-
|
|
|
|
|
|
|
| 51 |
|
| 52 |
try:
|
| 53 |
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
|
@@ -57,6 +59,7 @@ try:
|
|
| 57 |
infllmv2_attn_varlen_func,
|
| 58 |
infllmv2_attn_with_kvcache,
|
| 59 |
max_pooling_1d,
|
|
|
|
| 60 |
)
|
| 61 |
except:
|
| 62 |
pass
|
|
@@ -67,87 +70,71 @@ from functools import lru_cache
|
|
| 67 |
def compressed_attention(
|
| 68 |
q: torch.Tensor,
|
| 69 |
k: torch.Tensor,
|
| 70 |
-
|
| 71 |
kernel_size: int,
|
| 72 |
kernel_stride: int,
|
| 73 |
block_size: int,
|
| 74 |
topk: int,
|
| 75 |
cu_seqlens_q: torch.Tensor,
|
| 76 |
cu_seqlens_k: torch.Tensor,
|
|
|
|
| 77 |
max_seqlen_q: int,
|
| 78 |
max_seqlen_k: int,
|
| 79 |
sm_scale: float = None,
|
| 80 |
init_blocks: int = 1,
|
| 81 |
local_blocks: int = 2,
|
| 82 |
-
|
| 83 |
-
total_seq_lens=-1,
|
| 84 |
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 85 |
-
"""Attention between query and compressed key and value. Compute attention output and topk block idx used in topk_sparse_attention.
|
| 86 |
-
|
| 87 |
-
Args:
|
| 88 |
-
q (torch.Tensor): shape [total_q_len, num_q_heads, head_dim]
|
| 89 |
-
k (torch.Tensor): shape [total_kv_len, num_kv_heads, head_dim]
|
| 90 |
-
v (torch.Tensor): shape [total_kv_len, num_kv_heads, head_dim]
|
| 91 |
-
kernel_size (int): kernel size in compress_key_value
|
| 92 |
-
kernel_stride (int): stride of compress_key_value
|
| 93 |
-
block_size (int): key value block size for topk sparse attention.
|
| 94 |
-
topk (int): number of blocks for each query.
|
| 95 |
-
cu_seqlens_q (torch.Tensor): shape [batch_size + 1], similar to cu_seqlens_q in flash_attn_func_varlen.
|
| 96 |
-
cu_seqlens_k (torch.Tensor): shape [batch_size + 1], similar to cu_seqlens_k in flash_attn_func_varlen.
|
| 97 |
-
max_seqlen_q (int): max q len of the batch.
|
| 98 |
-
max_seqlen_k (int): max k len of the batch.
|
| 99 |
-
sm_scale (float, optional): softmax scale. Defaults to None, means 1/sqrt(head_dim).
|
| 100 |
-
init_blocks (int, optional): Number of init blocks for each query. Defaults to 1.
|
| 101 |
-
local_blocks (int, optional): Number of local blocks for each query. Defaults to 2.
|
| 102 |
-
parallel_topk_compute (str, optional): Only set it to False when the sequence length is too long. This can avoid a current bug.
|
| 103 |
-
We'll fix this issue later. Defaults to auto, it will be set to False when the sequence length is greater than 32k and True otherwise.
|
| 104 |
-
|
| 105 |
-
Returns:
|
| 106 |
-
Tuple[torch.Tensor, torch.Tensor]: attention output and topk_idx used in topk_sparse_attention
|
| 107 |
-
"""
|
| 108 |
with torch.no_grad():
|
| 109 |
-
cache_len = 0
|
| 110 |
batch_size = cu_seqlens_q.shape[0] - 1
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
q_idx =
|
| 126 |
-
|
|
|
|
| 127 |
score = infllmv2_attn_stage1(
|
| 128 |
q.contiguous(),
|
| 129 |
k.contiguous(),
|
| 130 |
-
|
| 131 |
cu_seqlens_q=cu_seqlens_q,
|
| 132 |
cu_seqlens_k=cu_seqlens_k,
|
|
|
|
| 133 |
max_seqlen_q=max_seqlen_q,
|
| 134 |
max_seqlen_k=max_seqlen_k,
|
| 135 |
-
causal=
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
block_score =
|
| 140 |
score.contiguous(),
|
| 141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
local_blocks=local_blocks,
|
| 143 |
init_blocks=init_blocks,
|
| 144 |
block_size=block_size,
|
| 145 |
-
stride=kernel_stride
|
| 146 |
-
)
|
|
|
|
|
|
|
| 147 |
# get topk
|
| 148 |
topk = min(topk, block_score.shape[-1])
|
| 149 |
topk_idx = block_score.topk(topk, dim=-1).indices.sort(-1).values
|
| 150 |
-
topk_idx[topk_idx
|
| 151 |
topk_idx = topk_idx.to(torch.int32)
|
| 152 |
|
| 153 |
return topk_idx
|
|
@@ -246,299 +233,133 @@ class CompressK(torch.nn.Module):
|
|
| 246 |
return compressed_k, cu_seqlens_compressed
|
| 247 |
|
| 248 |
|
| 249 |
-
class DynamicCacheQKV(DynamicCache):
|
| 250 |
-
"""
|
| 251 |
-
A cache that grows dynamically as more tokens are generated. This is the default for generative models.
|
| 252 |
-
|
| 253 |
-
It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
|
| 254 |
-
`[batch_size, num_heads, seq_len, head_dim]`.
|
| 255 |
-
|
| 256 |
-
Example:
|
| 257 |
-
```python
|
| 258 |
-
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
|
| 259 |
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
>>> inputs = tokenizer(text="My name is Qwen2", return_tensors="pt")
|
| 264 |
-
|
| 265 |
-
>>> # Prepare a cache class and pass it to model's forward
|
| 266 |
-
>>> past_key_values = DynamicCache()
|
| 267 |
-
>>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
|
| 268 |
-
>>> outputs.past_key_values # access cache filled with key/values from generation
|
| 269 |
-
DynamicCache()
|
| 270 |
-
```
|
| 271 |
-
"""
|
| 272 |
-
def __init__(self, num_hidden_layers: Optional[int] = None) -> None:
|
| 273 |
super().__init__()
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
else:
|
| 282 |
-
self.
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the
|
| 293 |
-
sequence length.
|
| 294 |
-
"""
|
| 295 |
-
if layer_idx < len(self):
|
| 296 |
-
return (self.key_cache[layer_idx], self.value_cache[layer_idx])
|
| 297 |
else:
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
def
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 314 |
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 322 |
-
"""
|
| 323 |
-
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
|
| 324 |
|
| 325 |
-
|
| 326 |
-
key_states (`torch.Tensor`):
|
| 327 |
-
The new key states to cache.
|
| 328 |
-
value_states (`torch.Tensor`):
|
| 329 |
-
The new value states to cache.
|
| 330 |
-
layer_idx (`int`):
|
| 331 |
-
The index of the layer to cache the states for.
|
| 332 |
-
cache_kwargs (`Dict[str, Any]`, `optional`):
|
| 333 |
-
Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
|
| 334 |
-
|
| 335 |
-
Return:
|
| 336 |
-
A tuple containing the updated key and value states.
|
| 337 |
-
"""
|
| 338 |
-
# Update the number of seen tokens
|
| 339 |
if layer_idx == 0:
|
| 340 |
self._seen_tokens += key_states.shape[-2]
|
|
|
|
| 341 |
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
self.key_cache.append(key_states)
|
| 345 |
-
self.value_cache.append(value_states)
|
| 346 |
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
elif self.key_cache[layer_idx] == []:
|
| 350 |
-
self.key_cache[layer_idx] = key_states
|
| 351 |
-
self.value_cache[layer_idx] = value_states
|
| 352 |
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
|
| 356 |
-
return self.key_cache[layer_idx], self.value_cache[layer_idx]
|
| 357 |
-
|
| 358 |
-
def update_no_rope_key(
|
| 359 |
-
self,
|
| 360 |
-
key_states: torch.Tensor,
|
| 361 |
-
layer_idx: int,
|
| 362 |
-
cache_kwargs: Optional[Dict[str, Any]] = None):
|
| 363 |
-
|
| 364 |
-
# Update the cache
|
| 365 |
-
if len(self.no_rope_key_cache) <= layer_idx:
|
| 366 |
-
self.no_rope_key_cache.append(key_states)
|
| 367 |
-
|
| 368 |
-
# content on layer cache can be a tensor and checking not tensor causes errors
|
| 369 |
-
# so we explicitly check for the empty list
|
| 370 |
-
elif self.no_rope_key_cache[layer_idx] == []:
|
| 371 |
-
self.no_rope_key_cache[layer_idx] = key_states
|
| 372 |
-
else:
|
| 373 |
-
self.no_rope_key_cache[layer_idx] = torch.cat([self.no_rope_key_cache[layer_idx], key_states], dim=1)
|
| 374 |
-
return self.no_rope_key_cache[layer_idx]
|
| 375 |
|
| 376 |
-
def
|
| 377 |
-
self,
|
| 378 |
-
key_states: torch.Tensor,
|
| 379 |
-
layer_idx: int,
|
| 380 |
-
cache_kwargs: Optional[Dict[str, Any]] = None
|
| 381 |
-
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 382 |
-
"""
|
| 383 |
-
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
|
| 384 |
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
The new key states to cache.
|
| 388 |
-
value_states (`torch.Tensor`):
|
| 389 |
-
The new value states to cache.
|
| 390 |
-
layer_idx (`int`):
|
| 391 |
-
The index of the layer to cache the states for.
|
| 392 |
-
cache_kwargs (`Dict[str, Any]`, `optional`):
|
| 393 |
-
Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
|
| 394 |
-
|
| 395 |
-
Return:
|
| 396 |
-
A tuple containing the updated key and value states.
|
| 397 |
-
"""
|
| 398 |
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
self.compress_k_cache[layer_idx] = key_states
|
| 407 |
-
else:
|
| 408 |
-
self.compress_k_cache[layer_idx] = torch.cat([self.compress_k_cache[layer_idx], key_states], dim=0)
|
| 409 |
-
return self.compress_k_cache[layer_idx]
|
| 410 |
|
| 411 |
-
def
|
| 412 |
-
self
|
| 413 |
-
|
| 414 |
-
layer_idx: int,
|
| 415 |
-
kernel_size: int = 32,
|
| 416 |
-
kernel_stride: int = 16,
|
| 417 |
-
cache_kwargs: Optional[Dict[str, Any]] = None
|
| 418 |
-
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 419 |
-
"""
|
| 420 |
-
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
|
| 421 |
-
|
| 422 |
-
Parameters:
|
| 423 |
-
key_states (`torch.Tensor`):
|
| 424 |
-
The new key states to cache.
|
| 425 |
-
value_states (`torch.Tensor`):
|
| 426 |
-
The new value states to cache.
|
| 427 |
-
layer_idx (`int`):
|
| 428 |
-
The index of the layer to cache the states for.
|
| 429 |
-
cache_kwargs (`Dict[str, Any]`, `optional`):
|
| 430 |
-
Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
|
| 431 |
-
|
| 432 |
-
Return:
|
| 433 |
-
A tuple containing the updated key and value states.
|
| 434 |
-
"""
|
| 435 |
-
# Update the cache
|
| 436 |
-
if len(self.no_compress_k_cache) <= layer_idx:
|
| 437 |
-
self.no_compress_k_cache.append(key_states)
|
| 438 |
-
|
| 439 |
-
# content on layer cache can be a tensor and checking not tensor causes errors
|
| 440 |
-
# so we explicitly check for the empty list
|
| 441 |
-
elif self.no_compress_k_cache[layer_idx] == []:
|
| 442 |
-
self.no_compress_k_cache[layer_idx] = key_states
|
| 443 |
-
else:
|
| 444 |
-
self.no_compress_k_cache[layer_idx] = torch.cat([self.no_compress_k_cache[layer_idx], key_states], dim=0)
|
| 445 |
-
|
| 446 |
-
current_len = self.no_compress_k_cache[layer_idx].shape[0]
|
| 447 |
-
|
| 448 |
-
if current_len >= kernel_size:
|
| 449 |
-
k_chunk = self.no_compress_k_cache[layer_idx][:kernel_size]
|
| 450 |
-
self.no_compress_k_cache[layer_idx] = self.no_compress_k_cache[layer_idx][kernel_stride:]
|
| 451 |
-
return k_chunk
|
| 452 |
-
else:
|
| 453 |
-
return None
|
| 454 |
-
|
| 455 |
-
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
|
| 456 |
-
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
|
| 457 |
-
# TODO: deprecate this function in favor of `cache_position`
|
| 458 |
-
if len(self.key_cache) <= layer_idx or (len(self.key_cache) > layer_idx and self.key_cache[layer_idx] == []):
|
| 459 |
-
return 0
|
| 460 |
-
return self.key_cache[layer_idx].shape[-2]
|
| 461 |
-
|
| 462 |
-
def get_max_length(self) -> Optional[int]:
|
| 463 |
-
"""Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length."""
|
| 464 |
-
return None
|
| 465 |
-
|
| 466 |
-
def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
|
| 467 |
-
"""Converts the `DynamicCache` instance into the its equivalent in the legacy cache format. Used for
|
| 468 |
-
backward compatibility."""
|
| 469 |
-
legacy_cache = ()
|
| 470 |
-
for layer_idx in range(len(self)):
|
| 471 |
-
legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx]),)
|
| 472 |
-
return legacy_cache
|
| 473 |
-
|
| 474 |
-
# @classmethod
|
| 475 |
-
# def from_legacy_cache(
|
| 476 |
-
# cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, num_hidden_layers: int = None
|
| 477 |
-
# ) -> "DynamicCacheQKV":
|
| 478 |
-
# """Converts a cache in the legacy cache format into an equivalent `DynamicCache`. Used for
|
| 479 |
-
# backward compatibility."""
|
| 480 |
-
# cache = cls(num_hidden_layers)
|
| 481 |
-
# if past_key_values is not None:
|
| 482 |
-
# for layer_idx in range(len(past_key_values)):
|
| 483 |
-
# key_states, value_states, query_status = past_key_values[layer_idx]
|
| 484 |
-
# cache.update(key_states, value_states, query_status,layer_idx)
|
| 485 |
-
# return cache
|
| 486 |
-
|
| 487 |
-
def crop(self, max_length: int):
|
| 488 |
-
"""Crop the past key values up to a new `max_length` in terms of tokens. `max_length` can also be
|
| 489 |
-
negative to remove `max_length` tokens. This is used in assisted decoding and contrastive search."""
|
| 490 |
-
# In case it is negative
|
| 491 |
-
if max_length < 0:
|
| 492 |
-
max_length = self.get_seq_length() - abs(max_length)
|
| 493 |
-
|
| 494 |
-
if self.get_seq_length() <= max_length:
|
| 495 |
-
return
|
| 496 |
-
|
| 497 |
-
self._seen_tokens = max_length
|
| 498 |
-
for idx in range(len(self.key_cache)):
|
| 499 |
-
if self.key_cache[idx] != []:
|
| 500 |
-
self.key_cache[idx] = self.key_cache[idx][..., :max_length, :]
|
| 501 |
-
self.value_cache[idx] = self.value_cache[idx][..., :max_length, :]
|
| 502 |
-
|
| 503 |
-
def batch_split(self, full_batch_size: int, split_size: int, num_hidden_layers: int) -> List['DynamicCacheQKV']:
|
| 504 |
-
"""Split the current instance into a list of `DynamicCache` by the batch size. This will be used by
|
| 505 |
-
`_split_model_inputs()` in `generation.utils`"""
|
| 506 |
-
out = []
|
| 507 |
-
for i in range(0, full_batch_size, split_size):
|
| 508 |
-
current_split = DynamicCacheQKV(num_hidden_layers)
|
| 509 |
-
current_split._seen_tokens = self._seen_tokens
|
| 510 |
-
current_split.key_cache = [tensor[i: i + split_size] for tensor in self.key_cache]
|
| 511 |
-
current_split.value_cache = [tensor[i: i + split_size] for tensor in self.value_cache]
|
| 512 |
-
out.append(current_split)
|
| 513 |
-
return out
|
| 514 |
-
|
| 515 |
-
@classmethod
|
| 516 |
-
def from_batch_splits(cls, splits: List['DynamicCacheQKV'], num_hidden_layers: int) -> 'DynamicCacheQKV':
|
| 517 |
-
"""This is the opposite of the above `batch_split()` method. This will be used by `stack_model_outputs` in
|
| 518 |
-
`generation.utils`"""
|
| 519 |
-
cache = cls(num_hidden_layers)
|
| 520 |
-
for idx in range(len(splits[0])):
|
| 521 |
-
key_cache = [current.key_cache[idx] for current in splits if current.key_cache[idx] != []]
|
| 522 |
-
value_cache = [current.key_cache[idx] for current in splits if current.key_cache[idx] != []]
|
| 523 |
-
query_cache = [current.key_cache[idx] for current in splits if current.key_cache[idx] != []]
|
| 524 |
-
if key_cache != []:
|
| 525 |
-
layer_keys = torch.cat(key_cache, dim=0)
|
| 526 |
-
layer_values = torch.cat(value_cache, dim=0)
|
| 527 |
-
layer_query = torch.cat(query_cache, dim=0)
|
| 528 |
-
cache.update(layer_keys, layer_values, idx, query_states=layer_query)
|
| 529 |
-
return cache
|
| 530 |
-
|
| 531 |
-
def batch_repeat_interleave(self, repeats: int):
|
| 532 |
-
"""Repeat the cache `repeats` times in the batch dimension. Used in contrastive search."""
|
| 533 |
-
for layer_idx in range(len(self)):
|
| 534 |
-
self.key_cache[layer_idx] = self.key_cache[layer_idx].repeat_interleave(repeats, dim=0)
|
| 535 |
-
self.value_cache[layer_idx] = self.value_cache[layer_idx].repeat_interleave(repeats, dim=0)
|
| 536 |
-
|
| 537 |
-
def batch_select_indices(self, indices: torch.Tensor):
|
| 538 |
-
"""Only keep the `indices` in the batch dimension of the cache. Used in contrastive search."""
|
| 539 |
-
for layer_idx in range(len(self)):
|
| 540 |
-
self.key_cache[layer_idx] = self.key_cache[layer_idx][indices, ...]
|
| 541 |
-
self.value_cache[layer_idx] = self.value_cache[layer_idx][indices, ...]
|
| 542 |
|
| 543 |
|
| 544 |
# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
|
|
@@ -567,22 +388,6 @@ def _get_unpad_data(attention_mask):
|
|
| 567 |
)
|
| 568 |
|
| 569 |
|
| 570 |
-
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
| 571 |
-
warnings.warn(
|
| 572 |
-
'Calling `transformers.models.minicpm.modeling_minicpm._prepare_4d_attention_mask` is deprecated and will be removed in v4.37. Use `transformers.modeling_attn_mask_utils._prepare_4d_attention_mask'
|
| 573 |
-
)
|
| 574 |
-
return _prepare_4d_attention_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
def _make_causal_mask(
|
| 578 |
-
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
| 579 |
-
):
|
| 580 |
-
warnings.warn(
|
| 581 |
-
'Calling `transformers.models.minicpm.modeling_minicpm._make_causal_mask` is deprecated and will be removed in v4.37. Use `transformers.models.minicpm.modeling_minicpm.AttentionMaskConverter._make_causal_mask'
|
| 582 |
-
)
|
| 583 |
-
return AttentionMaskConverter._make_causal_mask(
|
| 584 |
-
input_ids_shape=input_ids_shape, dtype=dtype, device=device, past_key_values_length=past_key_values_length
|
| 585 |
-
)
|
| 586 |
|
| 587 |
|
| 588 |
# @torch.jit.script # type: ignore
|
|
@@ -796,7 +601,21 @@ class MiniCPMMLP(nn.Module):
|
|
| 796 |
|
| 797 |
return down_proj
|
| 798 |
|
| 799 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 800 |
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 801 |
"""
|
| 802 |
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
|
@@ -927,15 +746,7 @@ class MiniCPMAttention(nn.Module):
|
|
| 927 |
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 928 |
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 929 |
|
| 930 |
-
kv_seq_len =
|
| 931 |
-
if past_key_value is not None:
|
| 932 |
-
if self.layer_idx is None:
|
| 933 |
-
raise ValueError(
|
| 934 |
-
f'The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} '
|
| 935 |
-
'for auto-regressive decoding with k/v caching, please make sure to initialize the attention class '
|
| 936 |
-
'with a layer index.'
|
| 937 |
-
)
|
| 938 |
-
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 939 |
cos, sin = self.rotary_emb(value_states.to(torch.float32), seq_len=kv_seq_len)
|
| 940 |
|
| 941 |
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
|
@@ -1037,9 +848,7 @@ class MiniCPMFlashAttention2(MiniCPMAttention):
|
|
| 1037 |
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 1038 |
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 1039 |
|
| 1040 |
-
kv_seq_len =
|
| 1041 |
-
if past_key_value is not None:
|
| 1042 |
-
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 1043 |
cos, sin = self.rotary_emb(value_states.to(torch.float32), seq_len=kv_seq_len)
|
| 1044 |
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 1045 |
|
|
@@ -1211,9 +1020,11 @@ class MiniCPMInfLLMv2Attention(MiniCPMAttention):
|
|
| 1211 |
self.dense_len = self.config.sparse_config.get('dense_len', 8192)
|
| 1212 |
|
| 1213 |
self.local_blocks = self.window_size // self.block_size # local_blocks
|
| 1214 |
-
self.topk = self.config.sparse_config.get('topk', 64)
|
| 1215 |
self.use_nope = self.config.sparse_config.get('use_nope', False)
|
|
|
|
| 1216 |
self.compress_k = CompressK(self.num_key_value_heads, self.head_dim, kernel_size=self.kernel_size, kernel_stride=self.kernel_stride)
|
|
|
|
| 1217 |
|
| 1218 |
def forward(
|
| 1219 |
self,
|
|
@@ -1237,7 +1048,7 @@ class MiniCPMInfLLMv2Attention(MiniCPMAttention):
|
|
| 1237 |
output_attentions = False
|
| 1238 |
|
| 1239 |
bsz, q_len, _ = hidden_states.size()
|
| 1240 |
-
|
| 1241 |
|
| 1242 |
query_states = self.q_proj(hidden_states)
|
| 1243 |
key_states = self.k_proj(hidden_states)
|
|
@@ -1255,9 +1066,7 @@ class MiniCPMInfLLMv2Attention(MiniCPMAttention):
|
|
| 1255 |
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 1256 |
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 1257 |
|
| 1258 |
-
kv_seq_len =
|
| 1259 |
-
if past_key_value is not None:
|
| 1260 |
-
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 1261 |
cos, sin = self.rotary_emb(value_states.to(torch.float32), seq_len=kv_seq_len)
|
| 1262 |
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 1263 |
|
|
@@ -1271,12 +1080,12 @@ class MiniCPMInfLLMv2Attention(MiniCPMAttention):
|
|
| 1271 |
key_states = key_states.transpose(1, 2)
|
| 1272 |
value_states = value_states.transpose(1, 2)
|
| 1273 |
if self.use_nope:
|
|
|
|
| 1274 |
no_rope_param = {
|
| 1275 |
'key_states_no_rope': key_states_no_rope,
|
| 1276 |
'query_states_no_rope': query_states_no_rope,
|
| 1277 |
}
|
| 1278 |
-
|
| 1279 |
-
past_key_value.update_no_rope_key(key_states_no_rope, self.layer_idx)
|
| 1280 |
else:
|
| 1281 |
no_rope_param = None
|
| 1282 |
|
|
@@ -1308,15 +1117,11 @@ class MiniCPMInfLLMv2Attention(MiniCPMAttention):
|
|
| 1308 |
if kv_seq_len < self.dense_len:
|
| 1309 |
attn_output = self._flash_attention_forward_dense(
|
| 1310 |
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate)
|
| 1311 |
-
|
| 1312 |
-
attn_output = self.
|
| 1313 |
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate,
|
| 1314 |
no_rope_param=no_rope_param, # if past_key_value is not None else None,
|
| 1315 |
past_key_value=past_key_value)
|
| 1316 |
-
else:
|
| 1317 |
-
attn_output = self._flash_attention_forward_with_kv_cache(
|
| 1318 |
-
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate, no_rope_param=no_rope_param, past_key_value=past_key_value)
|
| 1319 |
-
|
| 1320 |
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
| 1321 |
attn_output = self.o_proj(attn_output)
|
| 1322 |
|
|
@@ -1325,123 +1130,180 @@ class MiniCPMInfLLMv2Attention(MiniCPMAttention):
|
|
| 1325 |
|
| 1326 |
return attn_output, attn_weights, past_key_value
|
| 1327 |
|
| 1328 |
-
def
|
| 1329 |
-
|
| 1330 |
-
|
| 1331 |
-
|
| 1332 |
-
|
| 1333 |
-
|
| 1334 |
-
|
| 1335 |
-
|
| 1336 |
-
|
| 1337 |
-
|
| 1338 |
-
|
| 1339 |
-
|
| 1340 |
-
|
| 1341 |
-
|
| 1342 |
-
|
| 1343 |
-
|
| 1344 |
-
|
| 1345 |
-
|
| 1346 |
-
|
| 1347 |
-
|
| 1348 |
-
|
| 1349 |
-
|
| 1350 |
-
|
| 1351 |
-
|
| 1352 |
-
|
| 1353 |
-
|
| 1354 |
-
|
| 1355 |
-
|
| 1356 |
-
|
| 1357 |
-
|
| 1358 |
-
|
| 1359 |
-
|
| 1360 |
-
|
| 1361 |
-
|
| 1362 |
-
|
| 1363 |
-
|
| 1364 |
-
|
| 1365 |
-
|
| 1366 |
-
|
| 1367 |
-
|
| 1368 |
-
|
| 1369 |
-
|
| 1370 |
-
|
| 1371 |
-
|
| 1372 |
-
|
| 1373 |
-
|
| 1374 |
-
|
| 1375 |
-
|
| 1376 |
-
|
| 1377 |
-
|
| 1378 |
-
|
| 1379 |
-
|
| 1380 |
-
|
| 1381 |
-
|
| 1382 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1383 |
|
| 1384 |
-
|
|
|
|
|
|
|
|
|
|
| 1385 |
|
| 1386 |
-
|
| 1387 |
-
|
| 1388 |
-
):
|
| 1389 |
"""
|
| 1390 |
-
|
| 1391 |
-
|
| 1392 |
-
|
| 1393 |
Args:
|
| 1394 |
-
|
| 1395 |
-
|
| 1396 |
-
|
| 1397 |
-
|
| 1398 |
-
|
| 1399 |
-
|
| 1400 |
-
|
| 1401 |
-
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
| 1402 |
-
position of padding tokens and 1 for the position of non-padding tokens.
|
| 1403 |
-
dropout (`int`, *optional*):
|
| 1404 |
-
Attention dropout
|
| 1405 |
-
softmax_scale (`float`, *optional*):
|
| 1406 |
-
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
| 1407 |
"""
|
| 1408 |
-
|
| 1409 |
-
|
| 1410 |
-
else:
|
| 1411 |
-
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in MiniCPMFlashAttention2 __init__.
|
| 1412 |
-
causal = self.is_causal and query_length != 1
|
| 1413 |
-
# Contains at least one padding token in the sequence
|
| 1414 |
-
if attention_mask is not None:
|
| 1415 |
-
|
| 1416 |
-
batch_size = query_states.shape[0]
|
| 1417 |
|
| 1418 |
-
|
| 1419 |
-
|
| 1420 |
-
|
| 1421 |
-
|
| 1422 |
-
|
| 1423 |
-
|
| 1424 |
-
|
| 1425 |
-
|
| 1426 |
-
|
| 1427 |
-
|
| 1428 |
-
|
| 1429 |
-
|
| 1430 |
-
|
| 1431 |
-
|
| 1432 |
-
|
| 1433 |
-
|
| 1434 |
-
|
| 1435 |
-
|
| 1436 |
-
|
| 1437 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1438 |
|
| 1439 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1440 |
else:
|
| 1441 |
-
|
| 1442 |
-
|
| 1443 |
-
|
| 1444 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1445 |
def sparse_forward(self,
|
| 1446 |
query_layer,
|
| 1447 |
key_layer,
|
|
@@ -1451,37 +1313,32 @@ class MiniCPMInfLLMv2Attention(MiniCPMAttention):
|
|
| 1451 |
max_seqlen_in_batch_q,
|
| 1452 |
max_seqlen_in_batch_k,
|
| 1453 |
no_rope_param=None,
|
| 1454 |
-
|
| 1455 |
-
|
| 1456 |
-
compressed_k, compressed_cu_seqlens = self.compress_k(stage1_k, cu_seqlens_k)
|
| 1457 |
-
compressed_v = compressed_k.clone()
|
| 1458 |
-
if past_key_value is not None:
|
| 1459 |
-
# Compute the start indices of keys (k) that were not compressed, Only batch_size=1 is supported at the moment.
|
| 1460 |
-
no_compress_k_start = compressed_k.shape[0] * self.kernel_stride
|
| 1461 |
-
past_key_value.update_compress_k(
|
| 1462 |
-
compressed_k, self.layer_idx
|
| 1463 |
-
)
|
| 1464 |
-
past_key_value.update_no_compress_k(
|
| 1465 |
-
key_layer[no_compress_k_start:], self.layer_idx, no_compress_k_start)
|
| 1466 |
-
past_key_value.cached_compressed_cu_seqlens.append(compressed_cu_seqlens)
|
| 1467 |
compressed_seqlens = compressed_cu_seqlens[1:] - compressed_cu_seqlens[:-1]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1468 |
topk_idx = compressed_attention(
|
| 1469 |
query_layer if no_rope_param is None else no_rope_param['query_states_no_rope'],
|
| 1470 |
compressed_k,
|
| 1471 |
-
|
| 1472 |
self.kernel_size,
|
| 1473 |
self.kernel_stride,
|
| 1474 |
self.block_size,
|
| 1475 |
self.topk,
|
| 1476 |
cu_seqlens_q,
|
| 1477 |
compressed_cu_seqlens,
|
|
|
|
| 1478 |
max_seqlen_in_batch_q,
|
| 1479 |
compressed_seqlens.max().item(),
|
| 1480 |
None,
|
| 1481 |
init_blocks=self.init_blocks,
|
| 1482 |
local_blocks=self.local_blocks,
|
|
|
|
| 1483 |
)
|
| 1484 |
-
|
| 1485 |
topk_attn_output = infllmv2_attn_varlen_func(
|
| 1486 |
query_layer,
|
| 1487 |
key_layer,
|
|
@@ -1493,102 +1350,14 @@ class MiniCPMInfLLMv2Attention(MiniCPMAttention):
|
|
| 1493 |
dropout_p=0.0,
|
| 1494 |
deterministic=False,
|
| 1495 |
softmax_scale=None,
|
| 1496 |
-
causal=
|
| 1497 |
return_attn_probs=False,
|
| 1498 |
-
block_window_size=self.window_size // self.block_size,
|
| 1499 |
topk_idx=topk_idx
|
| 1500 |
)
|
| 1501 |
|
| 1502 |
return topk_attn_output
|
| 1503 |
|
| 1504 |
-
def sparse_forward_with_kv_cache(self, past_k=None, past_v=None, new_k=None, new_v=None, new_q=None, batch_size=None, no_rope_param=None, past_key_value=None):
|
| 1505 |
-
|
| 1506 |
-
# stage1_k = new_k.squeeze(0) if no_rope_param is None else no_rope_param['key_states_no_rope']
|
| 1507 |
-
if past_k.shape[1] + new_k.shape[1] == self.dense_len and (past_key_value.compress_k_cache == [] or len(past_key_value.compress_k_cache) < self.layer_idx + 1 or past_key_value.compress_k_cache[self.layer_idx] == []):
|
| 1508 |
-
if no_rope_param is not None:
|
| 1509 |
-
stage1_k = past_key_value.no_rope_key_cache[self.layer_idx].squeeze(0).contiguous() # just batch_size ==1
|
| 1510 |
-
else:
|
| 1511 |
-
stage1_k = torch.cat([past_k, new_k], dim=1).contiguous().squeeze(0).contiguous() # just batch_size ==1
|
| 1512 |
-
compressed_k, compressed_cu_seqlens = self.compress_k(stage1_k, torch.tensor([0, stage1_k.shape[0]], device=stage1_k.device, dtype=torch.int32)) # just batch_size ==1
|
| 1513 |
-
|
| 1514 |
-
# Compute the start indices of keys (k) that were not compressed, Only batch_size=1 is supported at the moment.
|
| 1515 |
-
no_compress_k_start = compressed_k.shape[0] * self.kernel_stride
|
| 1516 |
-
past_key_value.update_compress_k(
|
| 1517 |
-
compressed_k, self.layer_idx
|
| 1518 |
-
)
|
| 1519 |
-
past_key_value.update_no_compress_k(
|
| 1520 |
-
stage1_k[no_compress_k_start:], self.layer_idx, no_compress_k_start)
|
| 1521 |
-
past_key_value.cached_compressed_cu_seqlens.append(compressed_cu_seqlens)
|
| 1522 |
-
|
| 1523 |
-
else:
|
| 1524 |
-
stage1_k = new_k.squeeze(0) if no_rope_param is None else no_rope_param['key_states_no_rope']
|
| 1525 |
-
no_compress_k = past_key_value.update_no_compress_k(
|
| 1526 |
-
stage1_k, self.layer_idx, kernel_stride=self.kernel_stride, kernel_size=self.kernel_size)
|
| 1527 |
-
if no_compress_k is not None:
|
| 1528 |
-
compressed_k = no_compress_k.mean(dim=0, keepdim=True) # [1, n_heads_k, head_dim]
|
| 1529 |
-
|
| 1530 |
-
compressed_k = past_key_value.update_compress_k(
|
| 1531 |
-
compressed_k, self.layer_idx) # [seqlen, nheads_k, head_dim]
|
| 1532 |
-
|
| 1533 |
-
past_key_value.cached_compressed_cu_seqlens[self.layer_idx][-1] += 1 # !Increment the last entry in sequence lengths by 1; currently supports only batch_size = 1
|
| 1534 |
-
compressed_cu_seqlens = past_key_value.cached_compressed_cu_seqlens[self.layer_idx]
|
| 1535 |
-
else:
|
| 1536 |
-
compressed_k = past_key_value.compress_k_cache[self.layer_idx] # [seqlen, nheads_k, head_dim]
|
| 1537 |
-
compressed_cu_seqlens = past_key_value.cached_compressed_cu_seqlens[self.layer_idx]
|
| 1538 |
-
|
| 1539 |
-
compressed_v = compressed_k.clone()
|
| 1540 |
-
|
| 1541 |
-
compressed_seqlens = compressed_cu_seqlens[1:] - compressed_cu_seqlens[:-1]
|
| 1542 |
-
torch.cuda.synchronize()
|
| 1543 |
-
# Manually verify that the lengths match
|
| 1544 |
-
assert compressed_k.shape[0] == compressed_seqlens.sum().item(), 'The length of compressed_k does not match the sum of compressed_seqlens'
|
| 1545 |
-
topk_idx = compressed_attention(
|
| 1546 |
-
new_q.squeeze(0).contiguous() if no_rope_param is None else no_rope_param['query_states_no_rope'],
|
| 1547 |
-
compressed_k,
|
| 1548 |
-
compressed_v,
|
| 1549 |
-
self.kernel_size,
|
| 1550 |
-
self.kernel_stride,
|
| 1551 |
-
self.block_size,
|
| 1552 |
-
self.topk,
|
| 1553 |
-
torch.tensor([0, 1], device=compressed_k.device, dtype=torch.int32),
|
| 1554 |
-
compressed_cu_seqlens,
|
| 1555 |
-
1,
|
| 1556 |
-
compressed_seqlens.max().item(),
|
| 1557 |
-
None,
|
| 1558 |
-
init_blocks=self.init_blocks,
|
| 1559 |
-
local_blocks=self.local_blocks,
|
| 1560 |
-
total_seq_lens=past_k.shape[1] + 1, # !Only batch_size=1 is supported at the moment.
|
| 1561 |
-
)
|
| 1562 |
-
|
| 1563 |
-
repeat_times = 1
|
| 1564 |
-
if repeat_times > 1:
|
| 1565 |
-
new_q = new_q.repeat_interleave(repeat_times, dim=-2)
|
| 1566 |
-
else:
|
| 1567 |
-
new_q = new_q
|
| 1568 |
-
|
| 1569 |
-
cache_batch_idx = torch.arange(batch_size, device=new_q.device, dtype=torch.int32)
|
| 1570 |
-
|
| 1571 |
-
seqlen_k = past_k.shape[1] + new_k.shape[1] # !Only batch_size=1 is supported at the moment.
|
| 1572 |
-
seqlens_k = torch.full((batch_size,), seqlen_k - 1, dtype=torch.int32, device=new_q.device)
|
| 1573 |
-
|
| 1574 |
-
past_k = torch.cat([past_k, torch.zeros_like(new_k, dtype=new_k.dtype)], dim=1).contiguous() # Append one zero vector to avoid potential out-of-bounds access
|
| 1575 |
-
past_v = torch.cat([past_v, torch.zeros_like(new_v, dtype=new_v.dtype)], dim=1).contiguous() # Append one zero vector to avoid potential out-of-bounds access
|
| 1576 |
-
topk_attn_output = infllmv2_attn_with_kvcache(
|
| 1577 |
-
q=new_q,
|
| 1578 |
-
k_cache=past_k,
|
| 1579 |
-
v_cache=past_v,
|
| 1580 |
-
topk_idx=topk_idx,
|
| 1581 |
-
block_window_size=self.window_size // self.block_size,
|
| 1582 |
-
k=new_k, # [batch_size, 1, nheads_k, d]
|
| 1583 |
-
v=new_v, # [batch_size, 1, nheads_k, d]
|
| 1584 |
-
cache_seqlens=seqlens_k, # current_seqlens_k-1
|
| 1585 |
-
rotary_cos=None, # No rotary embeddings
|
| 1586 |
-
rotary_sin=None, # No rotary embeddings
|
| 1587 |
-
cache_batch_idx=cache_batch_idx,
|
| 1588 |
-
causal=False, # Renaming to match function signature
|
| 1589 |
-
)
|
| 1590 |
-
return topk_attn_output
|
| 1591 |
-
|
| 1592 |
def _flash_attention_forward_dense(
|
| 1593 |
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
|
| 1594 |
):
|
|
@@ -1727,9 +1496,7 @@ class MiniCPMSdpaAttention(MiniCPMAttention):
|
|
| 1727 |
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 1728 |
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 1729 |
|
| 1730 |
-
kv_seq_len =
|
| 1731 |
-
if past_key_value is not None:
|
| 1732 |
-
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| 1733 |
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 1734 |
|
| 1735 |
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
|
@@ -2052,11 +1819,13 @@ class MiniCPMModel(MiniCPMPreTrainedModel):
|
|
| 2052 |
raise ValueError(
|
| 2053 |
'You must use the new past_key_values format, such as the Cache class, instead of the old tuple format.'
|
| 2054 |
)
|
| 2055 |
-
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| 2056 |
|
| 2057 |
-
|
|
|
|
|
|
|
|
|
|
| 2058 |
if self.config.sparse_config is not None and torch.cuda.is_available() and past_key_values_length == 0:
|
| 2059 |
-
past_key_values =
|
| 2060 |
|
| 2061 |
if position_ids is None:
|
| 2062 |
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
|
@@ -2282,12 +2051,17 @@ class MiniCPMForCausalLM(MiniCPMPreTrainedModel):
|
|
| 2282 |
):
|
| 2283 |
if past_key_values is not None:
|
| 2284 |
if isinstance(past_key_values, Cache):
|
|
|
|
| 2285 |
cache_length = past_key_values.get_seq_length()
|
| 2286 |
-
|
| 2287 |
-
|
| 2288 |
-
|
| 2289 |
-
|
| 2290 |
max_cache_length = None
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2291 |
|
| 2292 |
# Keep only the unprocessed tokens:
|
| 2293 |
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
|
|
|
| 21 |
import torch
|
| 22 |
import torch.nn.functional as F
|
| 23 |
import torch.utils.checkpoint
|
| 24 |
+
from torch import nn
|
| 25 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| 26 |
from transformers.activations import ACT2FN
|
| 27 |
+
from transformers.cache_utils import Cache, DynamicCache, CacheLayerMixin, DynamicLayer
|
| 28 |
from transformers.modeling_attn_mask_utils import (
|
| 29 |
AttentionMaskConverter,
|
| 30 |
_prepare_4d_attention_mask,
|
|
|
|
| 47 |
)
|
| 48 |
from transformers.utils.import_utils import is_torch_fx_available
|
| 49 |
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
from .configuration_minicpm import MiniCPMConfig #!一定要改
|
| 53 |
|
| 54 |
try:
|
| 55 |
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
|
|
|
| 59 |
infllmv2_attn_varlen_func,
|
| 60 |
infllmv2_attn_with_kvcache,
|
| 61 |
max_pooling_1d,
|
| 62 |
+
max_pooling_1d_varlen
|
| 63 |
)
|
| 64 |
except:
|
| 65 |
pass
|
|
|
|
| 70 |
def compressed_attention(
|
| 71 |
q: torch.Tensor,
|
| 72 |
k: torch.Tensor,
|
| 73 |
+
k2: torch.Tensor,
|
| 74 |
kernel_size: int,
|
| 75 |
kernel_stride: int,
|
| 76 |
block_size: int,
|
| 77 |
topk: int,
|
| 78 |
cu_seqlens_q: torch.Tensor,
|
| 79 |
cu_seqlens_k: torch.Tensor,
|
| 80 |
+
cu_seqlens_k2: torch.Tensor,
|
| 81 |
max_seqlen_q: int,
|
| 82 |
max_seqlen_k: int,
|
| 83 |
sm_scale: float = None,
|
| 84 |
init_blocks: int = 1,
|
| 85 |
local_blocks: int = 2,
|
| 86 |
+
cache_lens=None,
|
|
|
|
| 87 |
) -> Tuple[torch.Tensor, torch.Tensor]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
with torch.no_grad():
|
|
|
|
| 89 |
batch_size = cu_seqlens_q.shape[0] - 1
|
| 90 |
+
|
| 91 |
+
# Check if it's prefilling stage
|
| 92 |
+
is_prefilling = cache_lens is None or (cache_lens == 0).all().item()
|
| 93 |
+
|
| 94 |
+
if is_prefilling: # prefilling stage
|
| 95 |
+
# Calculate q_idx for each query position in each batch
|
| 96 |
+
cache_lens = torch.zeros(batch_size, dtype=torch.int32, device=q.device)
|
| 97 |
+
q_idx = torch.cat([
|
| 98 |
+
(torch.arange(cu_seqlens_q[i + 1] - cu_seqlens_q[i], device=q.device) +
|
| 99 |
+
max_seqlen_q - (cu_seqlens_q[i + 1] - cu_seqlens_q[i])) // block_size
|
| 100 |
+
for i in range(batch_size)
|
| 101 |
+
], dim=0) # shape: [total_q_len]
|
| 102 |
+
else: # decoding stage
|
| 103 |
+
# Each batch has only one query (last position)
|
| 104 |
+
q_idx = cache_lens // block_size # shape: [batch_size] = [total_q_len] in decoding
|
| 105 |
+
|
| 106 |
+
# 计算attention score
|
| 107 |
score = infllmv2_attn_stage1(
|
| 108 |
q.contiguous(),
|
| 109 |
k.contiguous(),
|
| 110 |
+
k2.contiguous(),
|
| 111 |
cu_seqlens_q=cu_seqlens_q,
|
| 112 |
cu_seqlens_k=cu_seqlens_k,
|
| 113 |
+
cu_seqlens_v=cu_seqlens_k2,
|
| 114 |
max_seqlen_q=max_seqlen_q,
|
| 115 |
max_seqlen_k=max_seqlen_k,
|
| 116 |
+
causal=is_prefilling
|
| 117 |
+
)
|
| 118 |
+
score = score[:, :q_idx.shape[0], :] # [num_heads, total_q_len, num_blocks]
|
| 119 |
+
|
| 120 |
+
block_score = max_pooling_1d_varlen(
|
| 121 |
score.contiguous(),
|
| 122 |
+
cu_seqlens_q,
|
| 123 |
+
cu_seqlens_k,
|
| 124 |
+
cache_lens,
|
| 125 |
+
max_seqlen_q,
|
| 126 |
+
max_seqlen_k,
|
| 127 |
local_blocks=local_blocks,
|
| 128 |
init_blocks=init_blocks,
|
| 129 |
block_size=block_size,
|
| 130 |
+
stride=kernel_stride
|
| 131 |
+
) # shape: [num_heads, total_q_len, num_blocks]
|
| 132 |
+
|
| 133 |
+
|
| 134 |
# get topk
|
| 135 |
topk = min(topk, block_score.shape[-1])
|
| 136 |
topk_idx = block_score.topk(topk, dim=-1).indices.sort(-1).values
|
| 137 |
+
topk_idx[topk_idx > q_idx[None, :, None]] = -1
|
| 138 |
topk_idx = topk_idx.to(torch.int32)
|
| 139 |
|
| 140 |
return topk_idx
|
|
|
|
| 233 |
return compressed_k, cu_seqlens_compressed
|
| 234 |
|
| 235 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
|
| 237 |
+
class InfLLMv2CacheLayer(DynamicLayer):
|
| 238 |
+
def __init__(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 239 |
super().__init__()
|
| 240 |
+
# Initialize any additional attributes specific to InfLLMv2CacheLayer
|
| 241 |
+
self.no_rope_keys = torch.tensor([], dtype=torch.float32)
|
| 242 |
+
self.compress_k_cache = []
|
| 243 |
+
self.no_compress_k_cache = []
|
| 244 |
+
self.cached_compressed_cu_seqlens = torch.tensor([], dtype=torch.int32)
|
| 245 |
+
self.compress_k_cache_varlen = torch.tensor([], dtype=torch.float32)
|
| 246 |
+
# Add support for compress_k2
|
| 247 |
+
self.compress_k2_cache = []
|
| 248 |
+
self.cached_compressed_cu_seqlens2 = torch.tensor([], dtype=torch.int32)
|
| 249 |
+
self.compress_k2_cache_varlen = torch.tensor([], dtype=torch.float32)
|
| 250 |
+
self.no_compress_k2_cache = []
|
| 251 |
+
|
| 252 |
+
def update_no_rope_key(self, key_states):
|
| 253 |
+
if self.no_rope_keys.numel() == 0:
|
| 254 |
+
self.no_rope_keys = key_states
|
| 255 |
else:
|
| 256 |
+
self.no_rope_keys = torch.cat([self.no_rope_keys, key_states], dim=1)
|
| 257 |
+
return self.no_rope_keys
|
| 258 |
+
|
| 259 |
+
def update_compress_k(self, key_states, cu_seqlens=None):
|
| 260 |
+
if len(self.compress_k_cache) == 0:
|
| 261 |
+
if cu_seqlens is not None:
|
| 262 |
+
self.cached_compressed_cu_seqlens = cu_seqlens.clone()
|
| 263 |
+
self.compress_k_cache_varlen = key_states
|
| 264 |
+
split_sizes = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
|
| 265 |
+
self.compress_k_cache = list(torch.split(key_states, split_sizes))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
else:
|
| 267 |
+
for index, k in enumerate(key_states):
|
| 268 |
+
if k is not None:
|
| 269 |
+
self.compress_k_cache[index] = torch.cat([self.compress_k_cache[index], k], dim=0)
|
| 270 |
+
new_seq_lens = torch.tensor([tensor.shape[0] for tensor in self.compress_k_cache], dtype=torch.int32)
|
| 271 |
+
new_cumsum = torch.cumsum(new_seq_lens, dim=0, dtype=torch.int32)
|
| 272 |
+
|
| 273 |
+
self.compress_k_cache_varlen = torch.cat(self.compress_k_cache, dim=0)
|
| 274 |
+
self.cached_compressed_cu_seqlens = torch.cat([torch.tensor([0], dtype=torch.int32), new_cumsum]).to(self.compress_k_cache_varlen.device)
|
| 275 |
+
return self.compress_k_cache_varlen, self.cached_compressed_cu_seqlens
|
| 276 |
+
|
| 277 |
+
def update_no_compress_k(self, key_states, kernel_size=32, kernel_stride=16):
|
| 278 |
+
k_chunk_list = []
|
| 279 |
+
for index, k in enumerate(key_states):
|
| 280 |
+
if len(self.no_compress_k_cache) <= index:
|
| 281 |
+
self.no_compress_k_cache.append(k)
|
| 282 |
+
else:
|
| 283 |
+
self.no_compress_k_cache[index] = torch.cat([self.no_compress_k_cache[index], k], dim=0)
|
| 284 |
+
current_len = self.no_compress_k_cache[index].shape[0]
|
| 285 |
+
if current_len >= kernel_size:
|
| 286 |
+
k_chunk_list.append(self.no_compress_k_cache[index][:kernel_size])
|
| 287 |
+
self.no_compress_k_cache[index] = self.no_compress_k_cache[index][kernel_stride:]
|
| 288 |
+
else:
|
| 289 |
+
k_chunk_list.append(None)
|
| 290 |
+
return k_chunk_list
|
| 291 |
+
|
| 292 |
+
def update_compress_k2(self, key_states, cu_seqlens=None):
|
| 293 |
+
if len(self.compress_k2_cache) == 0:
|
| 294 |
+
if cu_seqlens is not None:
|
| 295 |
+
self.cached_compressed_cu_seqlens2 = cu_seqlens.clone()
|
| 296 |
+
self.compress_k2_cache_varlen = key_states
|
| 297 |
+
split_sizes = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
|
| 298 |
+
self.compress_k2_cache = list(torch.split(key_states, split_sizes))
|
| 299 |
+
else:
|
| 300 |
+
for index, k in enumerate(key_states):
|
| 301 |
+
if k is not None:
|
| 302 |
+
self.compress_k2_cache[index] = torch.cat([self.compress_k2_cache[index], k], dim=0)
|
| 303 |
+
new_seq_lens = torch.tensor([tensor.shape[0] for tensor in self.compress_k2_cache], dtype=torch.int32)
|
| 304 |
+
new_cumsum = torch.cumsum(new_seq_lens, dim=0, dtype=torch.int32)
|
| 305 |
+
|
| 306 |
+
self.compress_k2_cache_varlen = torch.cat(self.compress_k2_cache, dim=0)
|
| 307 |
+
self.cached_compressed_cu_seqlens2 = torch.cat([torch.tensor([0], dtype=torch.int32), new_cumsum]).to(self.compress_k2_cache_varlen.device)
|
| 308 |
+
return self.compress_k2_cache_varlen, self.cached_compressed_cu_seqlens2
|
| 309 |
+
|
| 310 |
+
def update_no_compress_k2(self, key_states, kernel_size=128, kernel_stride=64):
|
| 311 |
+
k_chunk_list = []
|
| 312 |
+
for index, k in enumerate(key_states):
|
| 313 |
+
if len(self.no_compress_k2_cache) <= index:
|
| 314 |
+
self.no_compress_k2_cache.append(k)
|
| 315 |
+
else:
|
| 316 |
+
self.no_compress_k2_cache[index] = torch.cat([self.no_compress_k2_cache[index], k], dim=0)
|
| 317 |
+
current_len = self.no_compress_k2_cache[index].shape[0]
|
| 318 |
+
if current_len >= kernel_size:
|
| 319 |
+
k_chunk_list.append(self.no_compress_k2_cache[index][:kernel_size])
|
| 320 |
+
self.no_compress_k2_cache[index] = self.no_compress_k2_cache[index][kernel_stride:]
|
| 321 |
+
else:
|
| 322 |
+
k_chunk_list.append(None)
|
| 323 |
+
return k_chunk_list
|
| 324 |
|
| 325 |
+
class InfLLMv2Cache(DynamicCache):
|
| 326 |
+
def __init__(self, config,num_hidden_layers: Optional[int] = None) -> None:
|
| 327 |
+
super().__init__(config=config)
|
| 328 |
+
self.layers = [InfLLMv2CacheLayer() for _ in range(num_hidden_layers)] if num_hidden_layers else []
|
| 329 |
+
self._seen_tokens = 0
|
| 330 |
+
|
|
|
|
|
|
|
|
|
|
| 331 |
|
| 332 |
+
def update(self, key_states, value_states, layer_idx, cache_kwargs=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 333 |
if layer_idx == 0:
|
| 334 |
self._seen_tokens += key_states.shape[-2]
|
| 335 |
+
return self.layers[layer_idx].update(key_states, value_states, cache_kwargs)
|
| 336 |
|
| 337 |
+
def update_no_rope_key(self, key_states, layer_idx, cache_kwargs=None):
|
| 338 |
+
return self.layers[layer_idx].update_no_rope_key(key_states)
|
|
|
|
|
|
|
| 339 |
|
| 340 |
+
def update_compress_k(self, key_states, layer_idx, cu_seqlens=None, cache_kwargs=None):
|
| 341 |
+
return self.layers[layer_idx].update_compress_k(key_states, cu_seqlens)
|
|
|
|
|
|
|
|
|
|
| 342 |
|
| 343 |
+
def update_no_compress_k(self, key_states, layer_idx, kernel_size=32, kernel_stride=16, cache_kwargs=None):
|
| 344 |
+
return self.layers[layer_idx].update_no_compress_k(key_states, kernel_size, kernel_stride)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 345 |
|
| 346 |
+
def update_compress_k2(self, key_states, layer_idx, cu_seqlens=None, cache_kwargs=None):
|
| 347 |
+
return self.layers[layer_idx].update_compress_k2(key_states, cu_seqlens)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 348 |
|
| 349 |
+
def update_no_compress_k2(self, key_states, layer_idx, kernel_size=128, kernel_stride=64, cache_kwargs=None):
|
| 350 |
+
return self.layers[layer_idx].update_no_compress_k2(key_states, kernel_size, kernel_stride)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 351 |
|
| 352 |
+
def crop(self, max_length):
|
| 353 |
+
for layer in self.layers:
|
| 354 |
+
layer.crop(max_length)
|
| 355 |
|
| 356 |
+
def batch_repeat_interleave(self, repeats):
|
| 357 |
+
for layer in self.layers:
|
| 358 |
+
layer.batch_repeat_interleave(repeats)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 359 |
|
| 360 |
+
def batch_select_indices(self, indices):
|
| 361 |
+
for layer in self.layers:
|
| 362 |
+
layer.batch_select_indices(indices)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 363 |
|
| 364 |
|
| 365 |
# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph.
|
|
|
|
| 388 |
)
|
| 389 |
|
| 390 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 391 |
|
| 392 |
|
| 393 |
# @torch.jit.script # type: ignore
|
|
|
|
| 601 |
|
| 602 |
return down_proj
|
| 603 |
|
| 604 |
+
def _unpad_one_tensor(hidden_states, attention_mask):
|
| 605 |
+
# Unpad the hidden states using the indices
|
| 606 |
+
indices, cu_seqlens, max_seqlen_in_batch = _get_unpad_data(attention_mask)
|
| 607 |
+
batch_size, seq_len = hidden_states.shape[:2]
|
| 608 |
+
|
| 609 |
+
# Get the remaining dimensions
|
| 610 |
+
remaining_dims = hidden_states.shape[2:]
|
| 611 |
+
|
| 612 |
+
# Reshape to (batch_size * seq_len, *remaining_dims)
|
| 613 |
+
reshaped_states = hidden_states.reshape(batch_size * seq_len, *remaining_dims)
|
| 614 |
+
|
| 615 |
+
# Apply unpadding using indices
|
| 616 |
+
unpadded_states = index_first_axis(reshaped_states, indices)
|
| 617 |
+
|
| 618 |
+
return unpadded_states, indices, cu_seqlens, max_seqlen_in_batch
|
| 619 |
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 620 |
"""
|
| 621 |
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
|
|
|
| 746 |
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 747 |
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 748 |
|
| 749 |
+
kv_seq_len = position_ids.max().item() + 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 750 |
cos, sin = self.rotary_emb(value_states.to(torch.float32), seq_len=kv_seq_len)
|
| 751 |
|
| 752 |
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
|
|
|
| 848 |
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 849 |
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 850 |
|
| 851 |
+
kv_seq_len = position_ids.max().item() + 1
|
|
|
|
|
|
|
| 852 |
cos, sin = self.rotary_emb(value_states.to(torch.float32), seq_len=kv_seq_len)
|
| 853 |
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 854 |
|
|
|
|
| 1020 |
self.dense_len = self.config.sparse_config.get('dense_len', 8192)
|
| 1021 |
|
| 1022 |
self.local_blocks = self.window_size // self.block_size # local_blocks
|
| 1023 |
+
self.topk = self.config.sparse_config.get('topk', 64) + (self.window_size//self.block_size)
|
| 1024 |
self.use_nope = self.config.sparse_config.get('use_nope', False)
|
| 1025 |
+
|
| 1026 |
self.compress_k = CompressK(self.num_key_value_heads, self.head_dim, kernel_size=self.kernel_size, kernel_stride=self.kernel_stride)
|
| 1027 |
+
self.compress_k2 = CompressK(self.num_key_value_heads, self.head_dim, kernel_size=self.kernel_size*4, kernel_stride=self.kernel_stride*4)
|
| 1028 |
|
| 1029 |
def forward(
|
| 1030 |
self,
|
|
|
|
| 1048 |
output_attentions = False
|
| 1049 |
|
| 1050 |
bsz, q_len, _ = hidden_states.size()
|
| 1051 |
+
|
| 1052 |
|
| 1053 |
query_states = self.q_proj(hidden_states)
|
| 1054 |
key_states = self.k_proj(hidden_states)
|
|
|
|
| 1066 |
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 1067 |
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 1068 |
|
| 1069 |
+
kv_seq_len = position_ids.max().item() + 1
|
|
|
|
|
|
|
| 1070 |
cos, sin = self.rotary_emb(value_states.to(torch.float32), seq_len=kv_seq_len)
|
| 1071 |
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
| 1072 |
|
|
|
|
| 1080 |
key_states = key_states.transpose(1, 2)
|
| 1081 |
value_states = value_states.transpose(1, 2)
|
| 1082 |
if self.use_nope:
|
| 1083 |
+
key_states_no_rope =past_key_value.update_no_rope_key(key_states_no_rope, self.layer_idx)
|
| 1084 |
no_rope_param = {
|
| 1085 |
'key_states_no_rope': key_states_no_rope,
|
| 1086 |
'query_states_no_rope': query_states_no_rope,
|
| 1087 |
}
|
| 1088 |
+
|
|
|
|
| 1089 |
else:
|
| 1090 |
no_rope_param = None
|
| 1091 |
|
|
|
|
| 1117 |
if kv_seq_len < self.dense_len:
|
| 1118 |
attn_output = self._flash_attention_forward_dense(
|
| 1119 |
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate)
|
| 1120 |
+
else:
|
| 1121 |
+
attn_output = self._sparse_attention_forward(
|
| 1122 |
query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate,
|
| 1123 |
no_rope_param=no_rope_param, # if past_key_value is not None else None,
|
| 1124 |
past_key_value=past_key_value)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1125 |
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
| 1126 |
attn_output = self.o_proj(attn_output)
|
| 1127 |
|
|
|
|
| 1130 |
|
| 1131 |
return attn_output, attn_weights, past_key_value
|
| 1132 |
|
| 1133 |
+
def _sparse_attention_forward(
|
| 1134 |
+
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None, no_rope_param=None, past_key_value=None
|
| 1135 |
+
):
|
| 1136 |
+
"""
|
| 1137 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
| 1138 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
| 1139 |
+
|
| 1140 |
+
Args:
|
| 1141 |
+
query_states (`torch.Tensor`):
|
| 1142 |
+
Input query states to be passed to Flash Attention API
|
| 1143 |
+
key_states (`torch.Tensor`):
|
| 1144 |
+
Input key states to be passed to Flash Attention API
|
| 1145 |
+
value_states (`torch.Tensor`):
|
| 1146 |
+
Input value states to be passed to Flash Attention API
|
| 1147 |
+
attention_mask (`torch.Tensor`):
|
| 1148 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
| 1149 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
| 1150 |
+
dropout (`int`, *optional*):
|
| 1151 |
+
Attention dropout
|
| 1152 |
+
softmax_scale (`float`, *optional*):
|
| 1153 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
| 1154 |
+
"""
|
| 1155 |
+
if not self._flash_attn_uses_top_left_mask:
|
| 1156 |
+
causal = self.is_causal
|
| 1157 |
+
else:
|
| 1158 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in MiniCPMFlashAttention2 __init__.
|
| 1159 |
+
causal = self.is_causal and query_length != 1
|
| 1160 |
+
# Contains at least one padding token in the sequence
|
| 1161 |
+
if attention_mask is not None:
|
| 1162 |
+
batch_size = query_states.shape[0]
|
| 1163 |
+
# assert batch_size == 1, 'Only batch_size=1 is supported at the moment.'
|
| 1164 |
+
if past_key_value!=None:
|
| 1165 |
+
compressed_k, compressed_cu_seqlens, compressed_k2, compressed_cu_seqlens2 = self.get_compress_k(
|
| 1166 |
+
key_states=key_states if self.use_nope ==False else no_rope_param['key_states_no_rope'], # This can be optimized a bit;
|
| 1167 |
+
attention_mask=attention_mask,
|
| 1168 |
+
past_key_value=past_key_value,
|
| 1169 |
+
|
| 1170 |
+
)
|
| 1171 |
+
|
| 1172 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
| 1173 |
+
query_states, key_states, value_states, attention_mask, query_length
|
| 1174 |
+
)
|
| 1175 |
+
|
| 1176 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
| 1177 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
| 1178 |
+
if no_rope_param != None:
|
| 1179 |
+
if max_seqlen_in_batch_q == 1:
|
| 1180 |
+
no_rope_param['query_states_no_rope'] = no_rope_param['query_states_no_rope'].squeeze(1)
|
| 1181 |
+
else:
|
| 1182 |
+
no_rope_param['query_states_no_rope'],_, _, _ = _unpad_one_tensor(no_rope_param['query_states_no_rope'],attention_mask=attention_mask)
|
| 1183 |
+
if past_key_value==None:
|
| 1184 |
+
# compress_k use varlen form
|
| 1185 |
+
compressed_k, compressed_cu_seqlens = self.compress_k(key_states,cu_seqlens_k)
|
| 1186 |
+
compressed_k2, compressed_cu_seqlens2 = self.compress_k2(key_states,cu_seqlens_k)
|
| 1187 |
+
else:
|
| 1188 |
+
# compressed_k and compressed_k2 already retrieved from get_compress_k above
|
| 1189 |
+
pass
|
| 1190 |
+
|
| 1191 |
+
|
| 1192 |
+
attn_output_unpad = self.sparse_forward(
|
| 1193 |
+
query_states,
|
| 1194 |
+
key_states,
|
| 1195 |
+
value_states,
|
| 1196 |
+
cu_seqlens_q,
|
| 1197 |
+
cu_seqlens_k,
|
| 1198 |
+
max_seqlen_in_batch_q,
|
| 1199 |
+
max_seqlen_in_batch_k,
|
| 1200 |
+
no_rope_param=no_rope_param,
|
| 1201 |
+
compressed_k=compressed_k, compressed_cu_seqlens=compressed_cu_seqlens,
|
| 1202 |
+
compressed_k2=compressed_k2, compressed_cu_seqlens2=compressed_cu_seqlens2
|
| 1203 |
+
)
|
| 1204 |
|
| 1205 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
| 1206 |
+
|
| 1207 |
+
else:
|
| 1208 |
+
raise ValueError('Need attention mask')
|
| 1209 |
|
| 1210 |
+
return attn_output
|
| 1211 |
+
def get_compress_k(self, key_states, attention_mask, past_key_value):
|
|
|
|
| 1212 |
"""
|
| 1213 |
+
Get compressed key states and corresponding cumulative sequence lengths.
|
| 1214 |
+
|
|
|
|
| 1215 |
Args:
|
| 1216 |
+
key_states: Key states tensor
|
| 1217 |
+
cu_seqlens_k: Cumulative sequence lengths for keys
|
| 1218 |
+
past_key_value: Past key-value cache
|
| 1219 |
+
no_rope_param: Optional parameter containing key states without rope
|
| 1220 |
+
|
| 1221 |
+
Returns:
|
| 1222 |
+
Tuple of (compressed_k, compressed_cu_seqlens, compressed_k2, compressed_cu_seqlens2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1223 |
"""
|
| 1224 |
+
|
| 1225 |
+
# Check if this is prefilling or initial compression condition
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1226 |
|
| 1227 |
+
is_prefilling = (
|
| 1228 |
+
key_states.shape[1] >= self.dense_len and
|
| 1229 |
+
(
|
| 1230 |
+
not past_key_value.layers[self.layer_idx].compress_k_cache
|
| 1231 |
+
)
|
| 1232 |
+
)
|
| 1233 |
+
|
| 1234 |
+
if is_prefilling:
|
| 1235 |
+
unpadded_key_states, indices, cu_seqlens, max_seqlen_in_batch = _unpad_one_tensor(key_states,attention_mask=attention_mask)
|
| 1236 |
+
# Compress the keys
|
| 1237 |
+
compressed_k, compressed_cu_seqlens = self.compress_k(unpadded_key_states, cu_seqlens)
|
| 1238 |
+
compressed_k2, compressed_cu_seqlens2 = self.compress_k2(unpadded_key_states, cu_seqlens)
|
| 1239 |
+
|
| 1240 |
+
past_key_value.update_compress_k(
|
| 1241 |
+
compressed_k, self.layer_idx, compressed_cu_seqlens)
|
| 1242 |
+
past_key_value.update_compress_k2(
|
| 1243 |
+
compressed_k2, self.layer_idx, compressed_cu_seqlens2)
|
| 1244 |
+
|
| 1245 |
+
no_compress_k_list = []
|
| 1246 |
+
# Compute and update no_compress_k
|
| 1247 |
+
for i in range(len(compressed_cu_seqlens)-1):
|
| 1248 |
+
no_compress_k_start = (compressed_cu_seqlens[i+1]- compressed_cu_seqlens[i]) * self.kernel_stride
|
| 1249 |
+
|
| 1250 |
+
no_compress_k_list.append(unpadded_key_states[cu_seqlens[i]+no_compress_k_start:cu_seqlens[i+1]].clone())
|
| 1251 |
|
| 1252 |
+
past_key_value.update_no_compress_k(
|
| 1253 |
+
no_compress_k_list, self.layer_idx,kernel_stride=self.kernel_stride,
|
| 1254 |
+
kernel_size=self.kernel_size)
|
| 1255 |
+
|
| 1256 |
+
# Also update no_compress_k2
|
| 1257 |
+
no_compress_k2_list = []
|
| 1258 |
+
for i in range(len(compressed_cu_seqlens2)-1):
|
| 1259 |
+
no_compress_k2_start = (compressed_cu_seqlens2[i+1]- compressed_cu_seqlens2[i]) * self.kernel_stride * 4
|
| 1260 |
+
|
| 1261 |
+
no_compress_k2_list.append(unpadded_key_states[cu_seqlens[i]+no_compress_k2_start:cu_seqlens[i+1]].clone())
|
| 1262 |
+
|
| 1263 |
+
past_key_value.update_no_compress_k2(
|
| 1264 |
+
no_compress_k2_list, self.layer_idx,kernel_stride=self.kernel_stride*4,
|
| 1265 |
+
kernel_size=self.kernel_size*4)
|
| 1266 |
+
|
| 1267 |
else:
|
| 1268 |
+
# Decode case: incremental update
|
| 1269 |
+
batch_size = key_states.shape[0] # key_states.shape = [batch_size, seq, k_head_num, head_dim]
|
| 1270 |
+
key_states_split = list(torch.split(
|
| 1271 |
+
key_states[:,-1:].squeeze(1), #[batch_size, seq, k_head_num, head_dim]->[batch_size, 1, k_head_num, head_dim]-> [batch_size, k_head_num, head_dim]
|
| 1272 |
+
[1] * batch_size,dim=0,
|
| 1273 |
+
))
|
| 1274 |
+
# Try to update no_compress_k buffer
|
| 1275 |
+
no_compress_k_list = past_key_value.update_no_compress_k(
|
| 1276 |
+
key_states_split, self.layer_idx,
|
| 1277 |
+
kernel_stride=self.kernel_stride,
|
| 1278 |
+
kernel_size=self.kernel_size)
|
| 1279 |
+
new_compressed_k_list = []
|
| 1280 |
+
for no_compress_k in no_compress_k_list:
|
| 1281 |
+
|
| 1282 |
+
if no_compress_k is not None:
|
| 1283 |
+
# We have enough tokens to compress
|
| 1284 |
+
new_compressed_k = no_compress_k.mean(dim=0, keepdim=True) # [1, n_heads_k, head_dim]
|
| 1285 |
+
|
| 1286 |
+
new_compressed_k_list.append(new_compressed_k)
|
| 1287 |
+
else:
|
| 1288 |
+
new_compressed_k_list.append(None)
|
| 1289 |
+
compressed_k, compressed_cu_seqlens = past_key_value.update_compress_k(new_compressed_k_list, self.layer_idx,)
|
| 1290 |
+
|
| 1291 |
+
# For compress_k2, update no_compress_k2 buffer and compress when ready
|
| 1292 |
+
no_compress_k2_list = past_key_value.update_no_compress_k2(
|
| 1293 |
+
key_states_split, self.layer_idx,
|
| 1294 |
+
kernel_stride=self.kernel_stride*4,
|
| 1295 |
+
kernel_size=self.kernel_size*4)
|
| 1296 |
+
new_compressed_k2_list = []
|
| 1297 |
+
for no_compress_k2 in no_compress_k2_list:
|
| 1298 |
+
if no_compress_k2 is not None:
|
| 1299 |
+
# We have enough tokens to compress for k2
|
| 1300 |
+
new_compressed_k2 = no_compress_k2.mean(dim=0, keepdim=True) # [1, n_heads_k, head_dim]
|
| 1301 |
+
new_compressed_k2_list.append(new_compressed_k2)
|
| 1302 |
+
else:
|
| 1303 |
+
new_compressed_k2_list.append(None)
|
| 1304 |
+
compressed_k2, compressed_cu_seqlens2 = past_key_value.update_compress_k2(new_compressed_k2_list, self.layer_idx,)
|
| 1305 |
+
|
| 1306 |
+
return compressed_k, compressed_cu_seqlens, compressed_k2, compressed_cu_seqlens2
|
| 1307 |
def sparse_forward(self,
|
| 1308 |
query_layer,
|
| 1309 |
key_layer,
|
|
|
|
| 1313 |
max_seqlen_in_batch_q,
|
| 1314 |
max_seqlen_in_batch_k,
|
| 1315 |
no_rope_param=None,
|
| 1316 |
+
compressed_k=None, compressed_cu_seqlens=None,
|
| 1317 |
+
compressed_k2=None, compressed_cu_seqlens2=None):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1318 |
compressed_seqlens = compressed_cu_seqlens[1:] - compressed_cu_seqlens[:-1]
|
| 1319 |
+
cache_lens = None
|
| 1320 |
+
if max_seqlen_in_batch_q==1 and max_seqlen_in_batch_k>1: #decoding
|
| 1321 |
+
seq_lens_k = cu_seqlens_k[1:] - cu_seqlens_k[:-1]
|
| 1322 |
+
cache_lens = seq_lens_k-1
|
| 1323 |
+
|
| 1324 |
topk_idx = compressed_attention(
|
| 1325 |
query_layer if no_rope_param is None else no_rope_param['query_states_no_rope'],
|
| 1326 |
compressed_k,
|
| 1327 |
+
compressed_k2,
|
| 1328 |
self.kernel_size,
|
| 1329 |
self.kernel_stride,
|
| 1330 |
self.block_size,
|
| 1331 |
self.topk,
|
| 1332 |
cu_seqlens_q,
|
| 1333 |
compressed_cu_seqlens,
|
| 1334 |
+
compressed_cu_seqlens2,
|
| 1335 |
max_seqlen_in_batch_q,
|
| 1336 |
compressed_seqlens.max().item(),
|
| 1337 |
None,
|
| 1338 |
init_blocks=self.init_blocks,
|
| 1339 |
local_blocks=self.local_blocks,
|
| 1340 |
+
cache_lens=cache_lens
|
| 1341 |
)
|
|
|
|
| 1342 |
topk_attn_output = infllmv2_attn_varlen_func(
|
| 1343 |
query_layer,
|
| 1344 |
key_layer,
|
|
|
|
| 1350 |
dropout_p=0.0,
|
| 1351 |
deterministic=False,
|
| 1352 |
softmax_scale=None,
|
| 1353 |
+
causal=max_seqlen_in_batch_q != 1,
|
| 1354 |
return_attn_probs=False,
|
| 1355 |
+
# block_window_size=self.window_size // self.block_size,
|
| 1356 |
topk_idx=topk_idx
|
| 1357 |
)
|
| 1358 |
|
| 1359 |
return topk_attn_output
|
| 1360 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1361 |
def _flash_attention_forward_dense(
|
| 1362 |
self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
|
| 1363 |
):
|
|
|
|
| 1496 |
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 1497 |
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| 1498 |
|
| 1499 |
+
kv_seq_len = position_ids.max().item() + 1
|
|
|
|
|
|
|
| 1500 |
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
| 1501 |
|
| 1502 |
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
|
|
|
| 1819 |
raise ValueError(
|
| 1820 |
'You must use the new past_key_values format, such as the Cache class, instead of the old tuple format.'
|
| 1821 |
)
|
|
|
|
| 1822 |
|
| 1823 |
+
# Calculate the usable length of past key values
|
| 1824 |
+
past_key_values_length = past_key_values.get_seq_length() if isinstance(past_key_values, InfLLMv2Cache) else 0
|
| 1825 |
+
|
| 1826 |
+
# Initialize InfLLMv2Cache if needed
|
| 1827 |
if self.config.sparse_config is not None and torch.cuda.is_available() and past_key_values_length == 0:
|
| 1828 |
+
past_key_values = InfLLMv2Cache(config = self.config, num_hidden_layers=self.config.num_hidden_layers)
|
| 1829 |
|
| 1830 |
if position_ids is None:
|
| 1831 |
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
|
|
|
| 2051 |
):
|
| 2052 |
if past_key_values is not None:
|
| 2053 |
if isinstance(past_key_values, Cache):
|
| 2054 |
+
# Use the new Cache class methods
|
| 2055 |
cache_length = past_key_values.get_seq_length()
|
| 2056 |
+
|
| 2057 |
+
if self.config.sparse_config is not None and torch.cuda.is_available() and cache_length == 0:
|
| 2058 |
+
past_key_values = InfLLMv2Cache(config = self.config, num_hidden_layers=self.config.num_hidden_layers)
|
| 2059 |
+
past_length = cache_length
|
| 2060 |
max_cache_length = None
|
| 2061 |
+
else:
|
| 2062 |
+
raise ValueError(
|
| 2063 |
+
'You must use the new past_key_values format, such as the Cache class, instead of the old tuple format.'
|
| 2064 |
+
)
|
| 2065 |
|
| 2066 |
# Keep only the unprocessed tokens:
|
| 2067 |
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|