File size: 20,064 Bytes
78360e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
import math
import os
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import logging
import numpy as np
import torch
from diffusers.image_processor import PipelineImageInput
from diffusers.utils.torch_utils import randn_tensor
from diffusers.video_processor import VideoProcessor
from tqdm import tqdm
from .modules.model import WanModel
from .modules.t5 import T5EncoderModel
from .modules.vae import WanVAE
from wan.modules.posemb_layers import get_rotary_pos_embed
from wan.utils.utils import calculate_new_dimensions
from .utils.fm_solvers import (FlowDPMSolverMultistepScheduler,
                               get_sampling_sigmas, retrieve_timesteps)
from .utils.fm_solvers_unipc import FlowUniPCMultistepScheduler

class DTT2V:


    def __init__(

        self,

        config,

        checkpoint_dir,

        rank=0,

        model_filename = None,

        model_type = None,

        base_model_type = None,

        save_quantized = False,

        text_encoder_filename = None,

        quantizeTransformer = False,

        dtype = torch.bfloat16,

        VAE_dtype = torch.float32,

        mixed_precision_transformer = False,

    ):
        self.device = torch.device(f"cuda")
        self.config = config
        self.rank = rank
        self.dtype = dtype
        self.num_train_timesteps = config.num_train_timesteps
        self.param_dtype = config.param_dtype

        self.text_encoder = T5EncoderModel(
            text_len=config.text_len,
            dtype=config.t5_dtype,
            device=torch.device('cpu'),
            checkpoint_path=text_encoder_filename,
            tokenizer_path=os.path.join(checkpoint_dir, config.t5_tokenizer),
            shard_fn= None)

        self.vae_stride = config.vae_stride
        self.patch_size = config.patch_size 

        self.vae = WanVAE(
            vae_pth=os.path.join(checkpoint_dir, config.vae_checkpoint), dtype= VAE_dtype,
            device=self.device)

        logging.info(f"Creating WanModel from {model_filename[-1]}")
        from mmgp import offload
        # model_filename = "model.safetensors"
        # model_filename = "c:/temp/diffusion_pytorch_model-00001-of-00006.safetensors"
        base_config_file = f"configs/{base_model_type}.json"
        forcedConfigPath = base_config_file if len(model_filename) > 1 else None
        self.model = offload.fast_load_transformers_model(model_filename, modelClass=WanModel,do_quantize= quantizeTransformer, writable_tensors= False , forcedConfigPath=forcedConfigPath)
        # offload.load_model_data(self.model, "recam.ckpt")
        # self.model.cpu()
        # dtype = torch.float16
        self.model.lock_layers_dtypes(torch.float32 if mixed_precision_transformer else dtype)
        offload.change_dtype(self.model, dtype, True)
        # offload.save_model(self.model, "sky_reels2_diffusion_forcing_1.3B_mbf16.safetensors", config_file_path="config.json") 
        # offload.save_model(self.model, "sky_reels2_diffusion_forcing_720p_14B_quanto_mbf16_int8.safetensors", do_quantize= True, config_file_path="c:/temp/config _df720.json") 
        # offload.save_model(self.model, "rtfp16_int8.safetensors", do_quantize= "config.json") 

        self.model.eval().requires_grad_(False)
        if save_quantized:            
            from wgp import save_quantized_model
            save_quantized_model(self.model, model_type, model_filename[0], dtype, base_config_file)

        self.scheduler = FlowUniPCMultistepScheduler()

    @property
    def do_classifier_free_guidance(self) -> bool:
        return self._guidance_scale > 1

    def encode_image(

        self, image_start: PipelineImageInput, height: int, width: int, num_frames: int, tile_size = 0, causal_block_size = 0

    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:

        # prefix_video
        prefix_video = np.array(image_start.resize((width, height))).transpose(2, 0, 1)
        prefix_video = torch.tensor(prefix_video).unsqueeze(1)  # .to(image_embeds.dtype).unsqueeze(1)
        if prefix_video.dtype == torch.uint8:
            prefix_video = (prefix_video.float() / (255.0 / 2.0)) - 1.0
        prefix_video = prefix_video.to(self.device)
        prefix_video = [self.vae.encode(prefix_video.unsqueeze(0), tile_size = tile_size)[0]]  # [(c, f, h, w)]
        if prefix_video[0].shape[1] % causal_block_size != 0:
            truncate_len = prefix_video[0].shape[1] % causal_block_size
            print("the length of prefix video is truncated for the casual block size alignment.")
            prefix_video[0] = prefix_video[0][:, : prefix_video[0].shape[1] - truncate_len]
        predix_video_latent_length = prefix_video[0].shape[1]
        return prefix_video, predix_video_latent_length

    def prepare_latents(

        self,

        shape: Tuple[int],

        dtype: Optional[torch.dtype] = None,

        device: Optional[torch.device] = None,

        generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,

    ) -> torch.Tensor:
        return randn_tensor(shape, generator, device=device, dtype=dtype)

    def generate_timestep_matrix(

        self,

        num_frames,

        step_template,

        base_num_frames,

        ar_step=5,

        num_pre_ready=0,

        casual_block_size=1,

        shrink_interval_with_mask=False,

    ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, list[tuple]]:
        step_matrix, step_index = [], []
        update_mask, valid_interval = [], []
        num_iterations = len(step_template) + 1
        num_frames_block = num_frames // casual_block_size
        base_num_frames_block = base_num_frames // casual_block_size
        if base_num_frames_block < num_frames_block:
            infer_step_num = len(step_template)
            gen_block = base_num_frames_block
            min_ar_step = infer_step_num / gen_block
            assert ar_step >= min_ar_step, f"ar_step should be at least {math.ceil(min_ar_step)} in your setting"
        # print(num_frames, step_template, base_num_frames, ar_step, num_pre_ready, casual_block_size, num_frames_block, base_num_frames_block)
        step_template = torch.cat(
            [
                torch.tensor([999], dtype=torch.int64, device=step_template.device),
                step_template.long(),
                torch.tensor([0], dtype=torch.int64, device=step_template.device),
            ]
        )  # to handle the counter in row works starting from 1
        pre_row = torch.zeros(num_frames_block, dtype=torch.long)
        if num_pre_ready > 0:
            pre_row[: num_pre_ready // casual_block_size] = num_iterations

        while torch.all(pre_row >= (num_iterations - 1)) == False:
            new_row = torch.zeros(num_frames_block, dtype=torch.long)
            for i in range(num_frames_block):
                if i == 0 or pre_row[i - 1] >= (
                    num_iterations - 1
                ):  # the first frame or the last frame is completely denoised
                    new_row[i] = pre_row[i] + 1
                else:
                    new_row[i] = new_row[i - 1] - ar_step
            new_row = new_row.clamp(0, num_iterations)

            update_mask.append(
                (new_row != pre_row) & (new_row != num_iterations)
            )  # False: no need to update, True: need to update
            step_index.append(new_row)
            step_matrix.append(step_template[new_row])
            pre_row = new_row

        # for long video we split into several sequences, base_num_frames is set to the model max length (for training)
        terminal_flag = base_num_frames_block
        if shrink_interval_with_mask:
            idx_sequence = torch.arange(num_frames_block, dtype=torch.int64)
            update_mask = update_mask[0]
            update_mask_idx = idx_sequence[update_mask]
            last_update_idx = update_mask_idx[-1].item()
            terminal_flag = last_update_idx + 1
        # for i in range(0, len(update_mask)):
        for curr_mask in update_mask:
            if terminal_flag < num_frames_block and curr_mask[terminal_flag]:
                terminal_flag += 1
            valid_interval.append((max(terminal_flag - base_num_frames_block, 0), terminal_flag))

        step_update_mask = torch.stack(update_mask, dim=0)
        step_index = torch.stack(step_index, dim=0)
        step_matrix = torch.stack(step_matrix, dim=0)

        if casual_block_size > 1:
            step_update_mask = step_update_mask.unsqueeze(-1).repeat(1, 1, casual_block_size).flatten(1).contiguous()
            step_index = step_index.unsqueeze(-1).repeat(1, 1, casual_block_size).flatten(1).contiguous()
            step_matrix = step_matrix.unsqueeze(-1).repeat(1, 1, casual_block_size).flatten(1).contiguous()
            valid_interval = [(s * casual_block_size, e * casual_block_size) for s, e in valid_interval]

        return step_matrix, step_index, step_update_mask, valid_interval

    @torch.no_grad()
    def generate(

        self,

        input_prompt: Union[str, List[str]],

        n_prompt: Union[str, List[str]] = "",

        image_start: PipelineImageInput = None,

        input_video = None,

        height: int = 480,

        width: int = 832,

        fit_into_canvas = True,

        frame_num: int = 97,

        sampling_steps: int = 50,

        shift: float = 1.0,

        guide_scale: float = 5.0,

        seed: float = 0.0,

        overlap_noise: int = 0,

        ar_step: int = 5,

        causal_block_size: int = 5,

        causal_attention: bool = True,

        fps: int = 24,

        VAE_tile_size = 0,

        joint_pass = False,

        slg_layers = None,

        slg_start = 0.0,

        slg_end = 1.0,

        callback = None,

        **bbargs

    ):
        self._interrupt = False
        generator = torch.Generator(device=self.device)
        generator.manual_seed(seed)
        self._guidance_scale = guide_scale
        frame_num = max(17, frame_num) # must match causal_block_size for value of 5
        frame_num = int( round( (frame_num - 17) / 20)* 20 + 17 )

        if ar_step == 0: 
            causal_block_size = 1
            causal_attention = False

        i2v_extra_kwrags = {}
        prefix_video = None
        predix_video_latent_length = 0

        if input_video != None:
            _ , _ , height, width  = input_video.shape
        elif image_start != None:
            image_start = image_start
            frame_width, frame_height  = image_start.size
            height, width = calculate_new_dimensions(height, width, frame_height, frame_width, fit_into_canvas)
            image_start = np.array(image_start.resize((width, height))).transpose(2, 0, 1)


        latent_length = (frame_num - 1) // 4 + 1
        latent_height = height // 8
        latent_width = width // 8

        if self._interrupt:
            return None
        prompt_embeds = self.text_encoder([input_prompt], self.device)[0]
        prompt_embeds  = prompt_embeds.to(self.dtype).to(self.device)
        if self.do_classifier_free_guidance:
            negative_prompt_embeds = self.text_encoder([n_prompt], self.device)[0]
            negative_prompt_embeds  = negative_prompt_embeds.to(self.dtype).to(self.device)

        if self._interrupt:
            return None

        self.scheduler.set_timesteps(sampling_steps, device=self.device, shift=shift)
        init_timesteps = self.scheduler.timesteps
        fps_embeds = [fps] #* prompt_embeds[0].shape[0]
        fps_embeds = [0 if i == 16 else 1 for i in fps_embeds]


        output_video = input_video

        if image_start is not None or output_video is not None:  # i !=0
            if output_video is not None:
                prefix_video = output_video.to(self.device)
            else:
                causal_block_size = 1
                causal_attention = False
                ar_step = 0
                prefix_video = image_start
                prefix_video = torch.tensor(prefix_video).unsqueeze(1)  # .to(image_embeds.dtype).unsqueeze(1)
                if prefix_video.dtype == torch.uint8:
                    prefix_video = (prefix_video.float() / (255.0 / 2.0)) - 1.0
                prefix_video = prefix_video.to(self.device)
            prefix_video = self.vae.encode(prefix_video.unsqueeze(0))[0]  # [(c, f, h, w)]
            predix_video_latent_length = prefix_video.shape[1]
            truncate_len = predix_video_latent_length % causal_block_size
            if truncate_len != 0:
                if truncate_len == predix_video_latent_length:
                    causal_block_size = 1
                    causal_attention = False
                    ar_step = 0
                else:
                    print("the length of prefix video is truncated for the casual block size alignment.")
                    predix_video_latent_length -= truncate_len
                    prefix_video = prefix_video[:, : predix_video_latent_length]

        base_num_frames_iter = latent_length
        latent_shape = [16, base_num_frames_iter, latent_height, latent_width]
        latents = self.prepare_latents(
            latent_shape, dtype=torch.float32, device=self.device, generator=generator
        )
        if prefix_video is not None:
            latents[:, :predix_video_latent_length] = prefix_video.to(torch.float32)
        step_matrix, _, step_update_mask, valid_interval = self.generate_timestep_matrix(
            base_num_frames_iter,
            init_timesteps,
            base_num_frames_iter,
            ar_step,
            predix_video_latent_length,
            causal_block_size,
        )
        sample_schedulers = []
        for _ in range(base_num_frames_iter):
            sample_scheduler = FlowUniPCMultistepScheduler(
                num_train_timesteps=1000, shift=1, use_dynamic_shifting=False
            )
            sample_scheduler.set_timesteps(sampling_steps, device=self.device, shift=shift)
            sample_schedulers.append(sample_scheduler)
        sample_schedulers_counter = [0] * base_num_frames_iter

        updated_num_steps=  len(step_matrix)
        if callback != None:
            callback(-1, None, True, override_num_inference_steps = updated_num_steps)
        if self.model.enable_cache:
            x_count = 2 if self.do_classifier_free_guidance else 1
            self.model.previous_residual = [None] * x_count 
            time_steps_comb = []
            self.model.num_steps = updated_num_steps
            for i, timestep_i in enumerate(step_matrix):
                valid_interval_start, valid_interval_end = valid_interval[i]
                timestep = timestep_i[None, valid_interval_start:valid_interval_end].clone()
                if overlap_noise > 0 and valid_interval_start < predix_video_latent_length:
                    timestep[:, valid_interval_start:predix_video_latent_length] = overlap_noise
                time_steps_comb.append(timestep)
            self.model.compute_teacache_threshold(self.model.cache_start_step, time_steps_comb, self.model.teacache_multiplier)
            del time_steps_comb
        from mmgp import offload
        freqs = get_rotary_pos_embed(latents.shape[1 :], enable_RIFLEx= False) 
        kwrags = {
            "freqs" :freqs,
            "fps" : fps_embeds,
            "causal_block_size" : causal_block_size,
            "causal_attention" : causal_attention,
            "callback" : callback,
            "pipeline" : self,
        }   
        kwrags.update(i2v_extra_kwrags)

        for i, timestep_i in enumerate(tqdm(step_matrix)):
            kwrags["slg_layers"] = slg_layers if int(slg_start * updated_num_steps) <= i < int(slg_end * updated_num_steps) else None

            offload.set_step_no_for_lora(self.model, i)
            update_mask_i = step_update_mask[i]
            valid_interval_start, valid_interval_end = valid_interval[i]
            timestep = timestep_i[None, valid_interval_start:valid_interval_end].clone()
            latent_model_input = latents[:, valid_interval_start:valid_interval_end, :, :].clone()
            if overlap_noise > 0 and valid_interval_start < predix_video_latent_length:
                noise_factor = 0.001 * overlap_noise
                timestep_for_noised_condition = overlap_noise
                latent_model_input[:, valid_interval_start:predix_video_latent_length] = (
                    latent_model_input[:, valid_interval_start:predix_video_latent_length]
                    * (1.0 - noise_factor)
                    + torch.randn_like(
                        latent_model_input[:, valid_interval_start:predix_video_latent_length]
                    )
                    * noise_factor
                )
                timestep[:, valid_interval_start:predix_video_latent_length] = timestep_for_noised_condition
            kwrags.update({
                "t" : timestep,
                "current_step" : i,                 
                })

            # with torch.autocast(device_type="cuda"):                
            if True:
                if not self.do_classifier_free_guidance:
                    noise_pred = self.model(
                        x=[latent_model_input],
                        context=[prompt_embeds],
                        **kwrags,
                    )[0]
                    if self._interrupt:
                        return None
                    noise_pred= noise_pred.to(torch.float32)                                                                  
                else:
                    if joint_pass:
                        noise_pred_cond, noise_pred_uncond = self.model(
                            x=[latent_model_input, latent_model_input],
                            context= [prompt_embeds, negative_prompt_embeds],
                            **kwrags,
                        )
                        if self._interrupt:
                            return None                
                    else:
                        noise_pred_cond = self.model(
                            x=[latent_model_input],
                            x_id=0,
                            context=[prompt_embeds],
                            **kwrags,
                        )[0]
                        if self._interrupt:
                            return None                
                        noise_pred_uncond = self.model(
                            x=[latent_model_input],
                            x_id=1,
                            context=[negative_prompt_embeds],
                            **kwrags,
                        )[0]
                        if self._interrupt:
                            return None
                    noise_pred = noise_pred_uncond + guide_scale * (noise_pred_cond - noise_pred_uncond)
                    del noise_pred_cond, noise_pred_uncond
            for idx in range(valid_interval_start, valid_interval_end):
                if update_mask_i[idx].item():
                    latents[:, idx] = sample_schedulers[idx].step(
                        noise_pred[:, idx - valid_interval_start],
                        timestep_i[idx],
                        latents[:, idx],
                        return_dict=False,
                        generator=generator,
                    )[0]
                    sample_schedulers_counter[idx] += 1
            if callback is not None:
                callback(i, latents.squeeze(0), False)         

        x0 = latents.unsqueeze(0)
        videos = [self.vae.decode(x0, tile_size= VAE_tile_size)[0]]
        output_video = videos[0].clamp(-1, 1).cpu()  # c, f, h, w
        return output_video