import os import warnings from typing import List, Optional, Tuple, Union import torch import transformers from torch import nn from torch.nn import CrossEntropyLoss from transformers import AutoModel, AutoModelForCausalLM, GenerationConfig from transformers.modeling_outputs import CausalLMOutputWithPast from transformers.modeling_utils import PreTrainedModel from transformers.utils import logging from .configuration import NemotronH_Nano_VL_V2_Config from .modeling_nemotron_h import NemotronHForCausalLM from .evs import EfficientVideoSampling logger = logging.get_logger(__name__) """ The following code is adapted from the https://huggingface.co/OpenGVLab/InternVL2-Llama3-76B/blob/main/modeling_internvl_chat.py repository The chat function is adapted to handle NVLM 1-D tile-tagging design for dynamic high-resolution images. """ class SquaredReLU(nn.Module): def forward(self, x): return torch.pow(torch.nn.functional.relu(x), 2) class RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-5): super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.eps = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.eps) return (self.weight.to(torch.float32) * hidden_states).to(input_dtype) def version_cmp(v1, v2, op='eq'): import operator from packaging import version op_func = getattr(operator, op) return op_func(version.parse(v1), version.parse(v2)) class NemotronH_Nano_VL_V2(PreTrainedModel): config_class = NemotronH_Nano_VL_V2_Config main_input_name = 'pixel_values' _supports_flash_attn_2 = True _no_split_modules = ['NemotronHBlock'] def __init__(self, config: NemotronH_Nano_VL_V2_Config): super().__init__(config) assert version_cmp(transformers.__version__, '4.36.2', 'ge') image_size = config.force_image_size patch_size = config.patch_size self.patch_size = patch_size self.template = config.template self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2)) self.downsample_ratio = config.downsample_ratio self.ps_version = config.ps_version self.image_tag_type = config.image_tag_type self.img_context_token_id = config.img_context_token_id self.video_context_token_id = config.video_context_token_id logger.info(f'num_image_token: {self.num_image_token}') logger.info(f'ps_version: {self.ps_version}') self.language_model = AutoModelForCausalLM.from_config(config.llm_config, trust_remote_code=True) self.vision_model = AutoModel.from_config(config.vision_config, trust_remote_code=True) self.vision_model.model._initialize_weights = self.vision_model.model._init_weights # WAR for transformers issue 38358 self.vision_model.radio_model.make_preprocessor_external() self.vision_model = self.vision_model.to(self.language_model.config.torch_dtype) self.drop_vision_class_token = True # Construct the vision projection. # Default vit_hidden_size = config.vit_hidden_size vision_projection_hidden_size = config.projector_hidden_size llm_hidden_size = config.llm_config.hidden_size self.video_pruning_rate = config.video_pruning_rate self.mlp1 = nn.Sequential( RMSNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, eps=1e-5), nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, vision_projection_hidden_size, bias=False), SquaredReLU(), nn.Linear(vision_projection_hidden_size, llm_hidden_size, bias=False) ) self.mlp1 = self.mlp1.to(self.language_model.config.torch_dtype) def forward( self, pixel_values: torch.FloatTensor, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, image_flags: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, labels: Optional[torch.LongTensor] = None, inputs_embeds = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is None: inputs_embeds = self.language_model.get_input_embeddings()(input_ids) image_flags = image_flags.squeeze(-1) B, N, C = inputs_embeds.shape inputs_embeds = inputs_embeds.reshape(B * N, C) input_ids = input_ids.reshape(B * N) selected = (input_ids == self.img_context_token_id) vit_batch_size = pixel_values.shape[0] vit_embeds = self.extract_feature(pixel_values) del pixel_values if torch.distributed.get_rank() == 0: print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}') vit_embeds = vit_embeds[image_flags == 1] try: inputs_embeds[selected] = inputs_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C) except Exception as e: vit_embeds = vit_embeds.reshape(-1, C) print(f'warning: {e}, inputs_embeds[selected].shape={inputs_embeds[selected].shape}, ' f'vit_embeds.shape={vit_embeds.shape}') n_token = selected.sum() inputs_embeds[selected] = inputs_embeds[selected] * 0.0 + vit_embeds[:n_token] del vit_embeds inputs_embeds = inputs_embeds.reshape(B, N, C) outputs = self.language_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = outputs.logits loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def pixel_shuffle(self, x, scale_factor=0.5): n, w, h, c = x.size() # N, W, H, C --> N, W, H * scale, C // scale x = x.view(n, w, int(h * scale_factor), int(c / scale_factor)) # N, W, H * scale, C // scale --> N, H * scale, W, C // scale x = x.permute(0, 2, 1, 3).contiguous() # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2) x = x.view(n, int(h * scale_factor), int(w * scale_factor), int(c / (scale_factor * scale_factor))) if self.ps_version == 'v1': warnings.warn("In ps_version 'v1', the height and width have not been swapped back, " 'which results in a transposed image.') else: x = x.permute(0, 2, 1, 3).contiguous() return x def extract_feature(self, pixel_values): vit_embeds = self.vision_model(pixel_values).features vit_embeds = vit_embeds.to(dtype=torch.bfloat16) h = w = int(vit_embeds.shape[1] ** 0.5) vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1) vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio) vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1]) vit_embeds = self.mlp1(vit_embeds) return vit_embeds @torch.no_grad() def generate( self, pixel_values: Optional[torch.FloatTensor] = None, pixel_values_videos: Optional[torch.FloatTensor] = None, input_ids: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, generation_config: Optional[GenerationConfig] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **generate_kwargs, ) -> torch.LongTensor: assert self.img_context_token_id is not None if pixel_values is not None or pixel_values_videos is not None: image_vit_embeds, video_vit_embeds = None, None if pixel_values is not None: pixel_values = pixel_values.to(dtype=self.vision_model.config.torch_dtype) image_vit_embeds = self.extract_feature(pixel_values) if pixel_values_videos is not None: pixel_values_videos = pixel_values_videos.to(dtype=self.vision_model.config.torch_dtype) video_vit_embeds = self.extract_feature(pixel_values_videos) inputs_embeds = self.language_model.get_input_embeddings()(input_ids) B, N, C = inputs_embeds.shape inputs_embeds = inputs_embeds.reshape(B * N, C) input_ids_copy = input_ids.reshape(B * N) if image_vit_embeds is not None: image_mask = (input_ids_copy == self.img_context_token_id) assert image_mask.sum() != 0 inputs_embeds[image_mask] = image_vit_embeds.reshape(-1, C).to(inputs_embeds.device, inputs_embeds.dtype) if video_vit_embeds is not None: if B > 1: raise NotImplementedError("Video is not supported for batch size > 1") video_mask = (input_ids_copy == self.video_context_token_id) assert video_mask.sum() != 0 inputs_embeds[video_mask] = video_vit_embeds.reshape(-1, C).to(inputs_embeds.device, inputs_embeds.dtype) if video_vit_embeds is not None and self.video_pruning_rate > 0: # EVS h = w = int(video_vit_embeds.shape[1] ** 0.5) # assumption here (and everywhere else) is that shape is square evs_mask = EfficientVideoSampling.compute_retention_mask( video_embeds=video_vit_embeds, thw=(video_vit_embeds.shape[0], h, w), spatial_merge_size=1, # we already work on vision embeddings, so no downsampling to follow q=self.video_pruning_rate, ) print(f"pruning rate: {self.video_pruning_rate}, EVS mask: {evs_mask.sum().item()} tokens retained out of {evs_mask.numel()} total video tokens ({evs_mask.sum().item() / evs_mask.numel() * 100:.2f}%)") retention_mask = torch.ones_like(input_ids_copy, dtype=torch.bool) retention_mask[video_mask] = evs_mask.view(-1) inputs_embeds = inputs_embeds[retention_mask].unsqueeze(0) # adding batch=1 if attention_mask is not None: attention_mask = attention_mask[:, retention_mask].contiguous() if input_ids is not None: input_ids = input_ids[:, retention_mask].contiguous() else: inputs_embeds = inputs_embeds.reshape(B, N, C) else: inputs_embeds = self.language_model.get_input_embeddings()(input_ids) # print(f"DEBUG: input_ids shape: {input_ids.shape}") # print(f"DEBUG: input text: {self._tokenizer.decode(input_ids[0])}") outputs = self.language_model.generate( input_ids=input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, generation_config=generation_config, output_hidden_states=output_hidden_states, use_cache=True, # return_dict_in_generate=True, # output_scores=True, **generate_kwargs, ) return outputs