|
|
import math |
|
|
from typing import Optional, List, Union, Tuple |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
|
|
|
from models.helpers import DropPath, drop_path |
|
|
|
|
|
from utils.model_args import ModelArgs |
|
|
from transformers import AutoImageProcessor, AutoModel |
|
|
|
|
|
|
|
|
__all__ = ['FFN', 'AdaLNSelfAttn', 'AdaLNBeforeHead'] |
|
|
|
|
|
|
|
|
|
|
|
dropout_add_layer_norm = fused_mlp_func = memory_efficient_attention = flash_attn_func = None |
|
|
try: |
|
|
from flash_attn.ops.layer_norm import dropout_add_layer_norm |
|
|
from flash_attn.ops.fused_dense import fused_mlp_func |
|
|
except ImportError: pass |
|
|
|
|
|
try: from xformers.ops import memory_efficient_attention |
|
|
except ImportError: pass |
|
|
try: from flash_attn import flash_attn_func |
|
|
except ImportError: pass |
|
|
try: from torch.nn.functional import scaled_dot_product_attention as slow_attn |
|
|
except ImportError: |
|
|
def slow_attn(query, key, value, scale: float, attn_mask=None, dropout_p=0.0): |
|
|
attn = query.mul(scale) @ key.transpose(-2, -1) |
|
|
if attn_mask is not None: attn.add_(attn_mask) |
|
|
return (F.dropout(attn.softmax(dim=-1), p=dropout_p, inplace=True) if dropout_p > 0 else attn.softmax(dim=-1)) @ value |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ConditionEmbedder(nn.Module): |
|
|
""" |
|
|
Embeds Condition into vector representations. Also handles label dropout for classifier-free guidance. |
|
|
""" |
|
|
def __init__(self, in_channels, hidden_size, uncond_prob, token_num=120, vocab_size=16384): |
|
|
super().__init__() |
|
|
self.cap_proj = MLP(in_features=hidden_size, hidden_features=hidden_size, out_features=hidden_size) |
|
|
self.register_buffer("uncond_embedding", torch.zeros(token_num, hidden_size) / hidden_size ** 0.5) |
|
|
self.uncond_prob = uncond_prob |
|
|
|
|
|
def token_drop(self, caption, force_drop_ids=None, drop_ids=None): |
|
|
""" |
|
|
Drops labels to enable classifier-free guidance. |
|
|
""" |
|
|
if force_drop_ids is None: |
|
|
if drop_ids is None: |
|
|
drop_ids = torch.rand(caption.shape[0], device=caption.device) < self.uncond_prob |
|
|
else: |
|
|
drop_ids = force_drop_ids == 1 |
|
|
if self.uncond_embedding.shape[0] < caption.shape[1]: |
|
|
|
|
|
repeat_factor = int(caption.shape[1] / self.uncond_embedding.shape[0]) + 1 |
|
|
extended = self.uncond_embedding.repeat(repeat_factor, 1)[:caption.shape[1]] |
|
|
else: |
|
|
extended = self.uncond_embedding[:caption.shape[1]] |
|
|
|
|
|
caption = torch.where(drop_ids[:, None, None], extended, caption) |
|
|
|
|
|
|
|
|
return caption |
|
|
|
|
|
def forward(self, caption, train, force_drop_ids=None, drop_ids=None): |
|
|
use_dropout = self.uncond_prob > 0 |
|
|
if (train and use_dropout) or (force_drop_ids is not None): |
|
|
caption = self.token_drop(caption, force_drop_ids, drop_ids) |
|
|
embeddings = self.cap_proj(caption) |
|
|
return embeddings |
|
|
|
|
|
class MLP(nn.Module): |
|
|
def __init__(self, in_features, hidden_features, out_features): |
|
|
super().__init__() |
|
|
out_features = out_features or in_features |
|
|
hidden_features = hidden_features or in_features |
|
|
self.fc1 = nn.Linear(in_features, hidden_features, bias=False) |
|
|
self.act = nn.GELU(approximate='tanh') |
|
|
self.fc2 = nn.Linear(hidden_features, out_features, bias=False) |
|
|
|
|
|
nn.init.zeros_(self.fc1.weight) |
|
|
nn.init.zeros_(self.fc2.weight) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.fc1(x) |
|
|
x = self.act(x) |
|
|
x = self.fc2(x) |
|
|
return x |
|
|
|
|
|
|
|
|
class Dinov2_Adapter(nn.Module): |
|
|
def __init__(self, input_dim=1, output_dim=768, attention=False, pool=False, nheads=8, dropout=0.1, adapter_size='small', condition_type='seg'): |
|
|
super(Dinov2_Adapter, self).__init__() |
|
|
|
|
|
|
|
|
|
|
|
from transformers import logging |
|
|
logging.set_verbosity_error() |
|
|
|
|
|
|
|
|
|
|
|
self.model = AutoModel.from_pretrained(f'./dinov2_small',local_files_only=True, use_safetensors=False) |
|
|
self.condition_type = condition_type |
|
|
|
|
|
def to_patch14(self, input): |
|
|
H, W = input.shape[2:] |
|
|
new_H = (H // 16) * 14 |
|
|
new_W = (W // 16) * 14 |
|
|
if self.condition_type in ['canny', 'seg']: |
|
|
output = torch.nn.functional.interpolate(input, size=(new_H, new_W), mode='nearest') |
|
|
else: |
|
|
output = torch.nn.functional.interpolate(input, size=(new_H, new_W), mode='bicubic', align_corners=True) |
|
|
return output |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.to_patch14(x) |
|
|
x = self.model(x) |
|
|
return x.last_hidden_state[:, 1:] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class CrossAttentionInjection(nn.Module): |
|
|
def __init__(self, embed_dim, num_heads): |
|
|
super().__init__() |
|
|
self.query_proj = nn.Linear(embed_dim, embed_dim) |
|
|
self.key_proj = nn.Linear(embed_dim, embed_dim) |
|
|
self.value_proj = nn.Linear(embed_dim, embed_dim) |
|
|
self.out_proj = nn.Linear(embed_dim, embed_dim) |
|
|
self.num_heads = num_heads |
|
|
self.scale = (embed_dim // num_heads) ** -0.5 |
|
|
|
|
|
def forward(self, x, cond_feat): |
|
|
""" |
|
|
x: [B, L, C],主特征序列 |
|
|
cond_feat: [B, L_cond, C],来自分割图的条件token序列 |
|
|
""" |
|
|
B, L, C = x.shape |
|
|
H = self.num_heads |
|
|
Q = self.query_proj(x).reshape(B, L, H, C // H).transpose(1, 2) |
|
|
K = self.key_proj(cond_feat).reshape(B, -1, H, C // H).transpose(1, 2) |
|
|
V = self.value_proj(cond_feat).reshape(B, -1, H, C // H).transpose(1, 2) |
|
|
|
|
|
attn = (Q @ K.transpose(-2, -1)) * self.scale |
|
|
attn = attn.softmax(dim=-1) |
|
|
out = (attn @ V).transpose(1, 2).reshape(B, L, C) |
|
|
return self.out_proj(out) |
|
|
|
|
|
|
|
|
|
|
|
class FFN(nn.Module): |
|
|
def __init__(self, in_features, hidden_features=None, out_features=None, drop=0., fused_if_available=True): |
|
|
super().__init__() |
|
|
self.fused_mlp_func = fused_mlp_func if fused_if_available else None |
|
|
out_features = out_features or in_features |
|
|
hidden_features = hidden_features or in_features |
|
|
self.fc1 = nn.Linear(in_features, hidden_features) |
|
|
self.act = nn.GELU(approximate='tanh') |
|
|
self.fc2 = nn.Linear(hidden_features, out_features) |
|
|
self.drop = nn.Dropout(drop, inplace=True) if drop > 0 else nn.Identity() |
|
|
|
|
|
def forward(self, x): |
|
|
if self.fused_mlp_func is not None: |
|
|
return self.drop(self.fused_mlp_func( |
|
|
x=x, weight1=self.fc1.weight, weight2=self.fc2.weight, bias1=self.fc1.bias, bias2=self.fc2.bias, |
|
|
activation='gelu_approx', save_pre_act=self.training, return_residual=False, checkpoint_lvl=0, |
|
|
heuristic=0, process_group=None, |
|
|
)) |
|
|
else: |
|
|
return self.drop(self.fc2( self.act(self.fc1(x)) )) |
|
|
|
|
|
def extra_repr(self) -> str: |
|
|
return f'fused_mlp_func={self.fused_mlp_func is not None}' |
|
|
|
|
|
|
|
|
class SelfAttention(nn.Module): |
|
|
def __init__( |
|
|
self, block_idx, embed_dim=768, num_heads=12, |
|
|
attn_drop=0., proj_drop=0., attn_l2_norm=False, flash_if_available=False, |
|
|
): |
|
|
super().__init__() |
|
|
assert embed_dim % num_heads == 0 |
|
|
self.block_idx, self.num_heads, self.head_dim = block_idx, num_heads, embed_dim // num_heads |
|
|
self.attn_l2_norm = attn_l2_norm |
|
|
if self.attn_l2_norm: |
|
|
self.scale = 1 |
|
|
self.scale_mul_1H11 = nn.Parameter(torch.full(size=(1, self.num_heads, 1, 1), fill_value=4.0).log(), requires_grad=True) |
|
|
self.max_scale_mul = torch.log(torch.tensor(100)).item() |
|
|
else: |
|
|
self.scale = 0.25 / math.sqrt(self.head_dim) |
|
|
|
|
|
self.mat_qkv = nn.Linear(embed_dim, embed_dim * 3, bias=False) |
|
|
self.q_bias, self.v_bias = nn.Parameter(torch.zeros(embed_dim)), nn.Parameter(torch.zeros(embed_dim)) |
|
|
self.register_buffer('zero_k_bias', torch.zeros(embed_dim)) |
|
|
|
|
|
self.proj = nn.Linear(embed_dim, embed_dim) |
|
|
self.proj_drop = nn.Dropout(proj_drop, inplace=True) if proj_drop > 0 else nn.Identity() |
|
|
self.attn_drop: float = attn_drop |
|
|
self.using_flash = flash_if_available and flash_attn_func is not None |
|
|
|
|
|
self.using_xform = False |
|
|
|
|
|
self.caching, self.cached_k, self.cached_v = False, None, None |
|
|
|
|
|
def kv_caching(self, enable: bool): self.caching, self.cached_k, self.cached_v = enable, None, None |
|
|
|
|
|
|
|
|
def forward(self, x, attn_bias): |
|
|
B, L, C = x.shape |
|
|
|
|
|
qkv = F.linear(input=x, weight=self.mat_qkv.weight, bias=torch.cat((self.q_bias, self.zero_k_bias, self.v_bias))).view(B, L, 3, self.num_heads, self.head_dim) |
|
|
main_type = qkv.dtype |
|
|
|
|
|
|
|
|
using_flash = self.using_flash and attn_bias is None and qkv.dtype != torch.float32 |
|
|
if using_flash or self.using_xform: q, k, v = qkv.unbind(dim=2); dim_cat = 1 |
|
|
else: q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(dim=0); dim_cat = 2 |
|
|
|
|
|
if self.attn_l2_norm: |
|
|
scale_mul = self.scale_mul_1H11.clamp_max(self.max_scale_mul).exp() |
|
|
if using_flash or self.using_xform: scale_mul = scale_mul.transpose(1, 2) |
|
|
q = F.normalize(q, dim=-1).mul(scale_mul) |
|
|
k = F.normalize(k, dim=-1) |
|
|
|
|
|
if self.caching: |
|
|
if self.cached_k is None: self.cached_k = k; self.cached_v = v |
|
|
else: k = self.cached_k = torch.cat((self.cached_k, k), dim=dim_cat); v = self.cached_v = torch.cat((self.cached_v, v), dim=dim_cat) |
|
|
|
|
|
dropout_p = self.attn_drop if self.training else 0.0 |
|
|
if using_flash: |
|
|
oup = flash_attn_func(q.to(dtype=main_type), k.to(dtype=main_type), v.to(dtype=main_type), dropout_p=dropout_p, softmax_scale=self.scale).view(B, L, C) |
|
|
elif self.using_xform: |
|
|
oup = memory_efficient_attention(q.to(dtype=main_type), k.to(dtype=main_type), v.to(dtype=main_type), attn_bias=None if attn_bias is None else attn_bias.to(dtype=main_type).expand(B, self.num_heads, -1, -1), p=dropout_p, scale=self.scale).view(B, L, C) |
|
|
else: |
|
|
oup = slow_attn(query=q, key=k, value=v, scale=self.scale, attn_mask=attn_bias, dropout_p=dropout_p).transpose(1, 2).reshape(B, L, C) |
|
|
|
|
|
return self.proj_drop(self.proj(oup)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def extra_repr(self) -> str: |
|
|
return f'using_flash={self.using_flash}, using_xform={self.using_xform}, attn_l2_norm={self.attn_l2_norm}' |
|
|
|
|
|
config = ModelArgs() |
|
|
class AdaLNSelfAttn(nn.Module): |
|
|
def __init__( |
|
|
self, block_idx, last_drop_p, embed_dim, cond_dim, shared_aln: bool, norm_layer, |
|
|
num_heads, mlp_ratio=4., drop=0., attn_drop=0., drop_path=0., attn_l2_norm=False, |
|
|
flash_if_available=False, fused_if_available=True,depth=16, |
|
|
): |
|
|
super(AdaLNSelfAttn, self).__init__() |
|
|
self.block_idx, self.last_drop_p, self.C = block_idx, last_drop_p, embed_dim |
|
|
self.C, self.D = embed_dim, cond_dim |
|
|
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() |
|
|
self.attn = SelfAttention(block_idx=block_idx, embed_dim=embed_dim, num_heads=num_heads, attn_drop=attn_drop, proj_drop=drop, attn_l2_norm=attn_l2_norm, flash_if_available=flash_if_available) |
|
|
self.ffn = FFN(in_features=embed_dim, hidden_features=round(embed_dim * mlp_ratio), drop=drop, fused_if_available=fused_if_available) |
|
|
|
|
|
self.ln_wo_grad = norm_layer(embed_dim, elementwise_affine=False) |
|
|
self.shared_aln = shared_aln |
|
|
if self.shared_aln: |
|
|
self.ada_gss = nn.Parameter(torch.randn(1, 1, 6, embed_dim) / embed_dim**0.5) |
|
|
else: |
|
|
lin = nn.Linear(cond_dim, 6*embed_dim) |
|
|
self.ada_lin = nn.Sequential(nn.SiLU(inplace=False), lin) |
|
|
|
|
|
self.fused_add_norm_fn = None |
|
|
|
|
|
self.adapter = Dinov2_Adapter(adapter_size=config.adapter_size, condition_type=config.condition_type) |
|
|
|
|
|
for p in self.adapter.model.parameters(): |
|
|
p.requires_grad = False |
|
|
|
|
|
|
|
|
if config.adapter_size == "small": |
|
|
self.adapter_mlp = MLP(384, config.dim, config.dim) |
|
|
elif config.adapter_size == 'base': |
|
|
self.adapter_mlp = MLP(768, config.dim, config.dim) |
|
|
|
|
|
self.condition_embeddings = nn.Embedding(config.vocab_size, config.dim) |
|
|
self.condition_mlp = ConditionEmbedder(config.block_size, config.dim, config.class_dropout_prob, config.block_size, config.vocab_size) |
|
|
|
|
|
self.condition_layers = torch.nn.ModuleList() |
|
|
for layer_id in range(3): |
|
|
self.condition_layers.append(MLP(config.dim,config.dim,config.dim)) |
|
|
|
|
|
self.layer_internal = depth=16 // 2 |
|
|
self.control_strength = 1 |
|
|
|
|
|
|
|
|
self.cross_attn_inject = CrossAttentionInjection(embed_dim=config.dim, num_heads=num_heads) |
|
|
|
|
|
|
|
|
|
|
|
def forward(self, x, cond_BD, condition, attn_bias, current_step: int, total_steps:int): |
|
|
if self.shared_aln: |
|
|
gamma1, gamma2, scale1, scale2, shift1, shift2 = (self.ada_gss + cond_BD).unbind(2) |
|
|
else: |
|
|
gamma1, gamma2, scale1, scale2, shift1, shift2 = self.ada_lin(cond_BD).view(-1, 1, 6, self.C).unbind(2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if condition is not None: |
|
|
condition_embeddings = self.adapter(condition) |
|
|
condition_embeddings = self.adapter_mlp(condition_embeddings) |
|
|
self.condition_token = self.condition_mlp(condition_embeddings,train=self.training) |
|
|
|
|
|
cond_feat = self.condition_layers[self.block_idx // self.layer_internal](self.condition_token) |
|
|
cond_feat = cond_feat.mean(dim=1, keepdim=True).expand(-1, x.shape[1], -1) |
|
|
|
|
|
|
|
|
cross_attn_out = self.cross_attn_inject(x, self.condition_token) |
|
|
|
|
|
if current_step is not None: |
|
|
progress = min(current_step / total_steps, 1.0) |
|
|
alpha = 0.5 * (1 + math.cos(math.pi * progress)) |
|
|
else: |
|
|
alpha = 1.0 |
|
|
|
|
|
x = x + alpha * cross_attn_out |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
x = x + self.drop_path(self.attn( self.ln_wo_grad(x).mul(scale1.add(1)).add_(shift1), attn_bias=attn_bias ).mul_(gamma1)) |
|
|
x = x + self.drop_path(self.ffn( self.ln_wo_grad(x).mul(scale2.add(1)).add_(shift2) ).mul(gamma2)) |
|
|
return x |
|
|
|
|
|
|
|
|
def extra_repr(self) -> str: |
|
|
return f'shared_aln={self.shared_aln}' |
|
|
|
|
|
|
|
|
class AdaLNBeforeHead(nn.Module): |
|
|
def __init__(self, C, D, norm_layer): |
|
|
super().__init__() |
|
|
self.C, self.D = C, D |
|
|
self.ln_wo_grad = norm_layer(C, elementwise_affine=False) |
|
|
self.ada_lin = nn.Sequential(nn.SiLU(inplace=False), nn.Linear(D, 2*C)) |
|
|
|
|
|
def forward(self, x_BLC: torch.Tensor, cond_BD: torch.Tensor): |
|
|
scale, shift = self.ada_lin(cond_BD).view(-1, 1, 2, self.C).unbind(2) |
|
|
return self.ln_wo_grad(x_BLC).mul(scale.add(1)).add_(shift) |
|
|
|