jymcc commited on
Commit
469ca62
·
verified ·
1 Parent(s): 1b4c6a3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +249 -1
README.md CHANGED
@@ -24,7 +24,7 @@ datasets:
24
  </div>
25
 
26
  <div align="center">
27
- <a href="https://github.com/FreedomIntelligence/ShareGPT-4o-Image" target="_blank">🧰GitHub</a> | <a href="https://arxiv.org/abs/2506.18095" target="_blank">📃Paper</a> | <a href="https://arxiv.org/abs/2506.18095" target="_blank">📚ShareGPT-4o-Image</a>
28
  </div>
29
 
30
  ## 1. Introduction
@@ -33,6 +33,254 @@ Janus-4o is a multimodal large language model (MLLM) capable of both **text-to-i
33
 
34
 
35
  ## 2. Quick Start
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  ## Citation
38
 
 
24
  </div>
25
 
26
  <div align="center">
27
+ <a href="https://github.com/FreedomIntelligence/ShareGPT-4o-Image" target="_blank">🧰GitHub</a> | <a href="https://arxiv.org/abs/2506.18095" target="_blank">📃Paper</a> | <a href="https://arxiv.org/abs/2506.18095" target="_blank">📚ShareGPT-4o-Image</a>
28
  </div>
29
 
30
  ## 1. Introduction
 
33
 
34
 
35
  ## 2. Quick Start
36
+ ### Step 1: Install the [Janus](https://github.com/deepseek-ai/Janus) Library
37
+ ```Bash
38
+ git clone https://github.com/deepseek-ai/Janus.git
39
+ cd Janus
40
+ pip install -e .
41
+ ```
42
+
43
+ ### Step 2: Inference
44
+ - **Text-to-Image Generation**
45
+ ```Python
46
+ import os
47
+ import PIL.Image
48
+ import torch
49
+ import numpy as np
50
+ from transformers import AutoModelForCausalLM
51
+ from janus.models import MultiModalityCausalLM, VLChatProcessor
52
+
53
+ # Load model and processor
54
+ model_path = "FreedomIntelligence/Janus-4o-7B"
55
+ vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
56
+ tokenizer = vl_chat_processor.tokenizer
57
+ vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
58
+ model_path, trust_remote_code=True,torch_dtype=torch.bfloat16
59
+ )
60
+ vl_gpt = vl_gpt.cuda().eval()
61
+
62
+ # Define text-to-image generation function
63
+ def text_to_image_generate(input_prompt, output_path, vl_chat_processor, vl_gpt, temperature = 1.0, parallel_size = 2, cfg_weight = 5):
64
+
65
+ torch.cuda.empty_cache()
66
+
67
+ conversation = [
68
+ {
69
+ "role": "<|User|>",
70
+ "content": input_prompt,
71
+ },
72
+ {"role": "<|Assistant|>", "content": ""},
73
+ ]
74
+
75
+ sft_format = vl_chat_processor.apply_sft_template_for_multi_turn_prompts(
76
+ conversations=conversation,
77
+ sft_format=vl_chat_processor.sft_format,
78
+ system_prompt="",
79
+ )
80
+
81
+ prompt = sft_format + vl_chat_processor.image_start_tag
82
+
83
+ mmgpt = vl_gpt
84
+
85
+ image_token_num_per_image = 576
86
+ img_size = 384
87
+ patch_size = 16
88
+
89
+ with torch.inference_mode():
90
+ input_ids = vl_chat_processor.tokenizer.encode(prompt)
91
+ input_ids = torch.LongTensor(input_ids)
92
+
93
+ tokens = torch.zeros((parallel_size*2, len(input_ids)), dtype=torch.int).cuda()
94
+ for i in range(parallel_size*2):
95
+ tokens[i, :] = input_ids
96
+ if i % 2 != 0:
97
+ tokens[i, 1:-1] = vl_chat_processor.pad_id
98
+
99
+ inputs_embeds = mmgpt.language_model.get_input_embeddings()(tokens)
100
+
101
+ generated_tokens = torch.zeros((parallel_size, image_token_num_per_image), dtype=torch.int).cuda()
102
+
103
+ for i in range(image_token_num_per_image):
104
+ outputs = mmgpt.language_model.model(inputs_embeds=inputs_embeds, use_cache=True, past_key_values=outputs.past_key_values if i != 0 else None)
105
+ hidden_states = outputs.last_hidden_state
106
+
107
+ logits = mmgpt.gen_head(hidden_states[:, -1, :])
108
+ logit_cond = logits[0::2, :]
109
+ logit_uncond = logits[1::2, :]
110
+
111
+ logits = logit_uncond + cfg_weight * (logit_cond-logit_uncond)
112
+ probs = torch.softmax(logits / temperature, dim=-1)
113
+
114
+ next_token = torch.multinomial(probs, num_samples=1)
115
+ generated_tokens[:, i] = next_token.squeeze(dim=-1)
116
+
117
+ next_token = torch.cat([next_token.unsqueeze(dim=1), next_token.unsqueeze(dim=1)], dim=1).view(-1)
118
+ img_embeds = mmgpt.prepare_gen_img_embeds(next_token)
119
+ inputs_embeds = img_embeds.unsqueeze(dim=1)
120
+
121
+ dec = mmgpt.gen_vision_model.decode_code(generated_tokens.to(dtype=torch.int), shape=[parallel_size, 8, img_size//patch_size, img_size//patch_size])
122
+ dec = dec.to(torch.float32).cpu().numpy().transpose(0, 2, 3, 1)
123
+
124
+ dec = np.clip((dec + 1) / 2 * 255, 0, 255)
125
+
126
+ visual_img = np.zeros((parallel_size, img_size, img_size, 3), dtype=np.uint8)
127
+ visual_img[:, :, :] = dec
128
+
129
+ os.makedirs(output_path, exist_ok=True)
130
+ output_images = []
131
+ for i in range(parallel_size):
132
+ save_path = output_path.replace('.png','') + f'_{i}.png'
133
+ PIL.Image.fromarray(visual_img[i]).save(save_path)
134
+ output_images.append(save_path)
135
+ return output_images
136
+
137
+ # Run
138
+ prompt = "A stunning princess from kabul in red, white traditional clothing, blue eyes, brown hair"
139
+ image_output_path = "./test.png"
140
+ text_to_image_generate(prompt, image_output_path, vl_chat_processor, vl_gpt, parallel_size = 2)
141
+ ```
142
+
143
+
144
+ - **2. Text-and-Image-to-Image Generation**
145
+
146
+ ```Python
147
+ import os
148
+ import PIL.Image
149
+ import torch
150
+ import numpy as np
151
+ from transformers import AutoModelForCausalLM
152
+ from janus.models import MultiModalityCausalLM, VLChatProcessor
153
+ from dataclasses import dataclass
154
+ @dataclass
155
+ class VLChatProcessorOutput():
156
+ sft_format: str
157
+ input_ids: torch.Tensor
158
+ pixel_values: torch.Tensor
159
+ num_image_tokens: torch.IntTensor
160
+
161
+ def __len__(self):
162
+ return len(self.input_ids)
163
+
164
+ def process_image(image_paths,vl_chat_processor):
165
+ images = [PIL.Image.open(image_path).convert("RGB") for image_path in image_paths]
166
+ images_outputs = vl_chat_processor.image_processor(images, return_tensors="pt")
167
+ return images_outputs['pixel_values']
168
+
169
+ # Load model and processor
170
+ model_path = "FreedomIntelligence/Janus-4o-7B"
171
+ vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
172
+ tokenizer = vl_chat_processor.tokenizer
173
+ vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
174
+ model_path, trust_remote_code=True,torch_dtype=torch.bfloat16
175
+ )
176
+ vl_gpt = vl_gpt.cuda().eval()
177
+
178
+ # Define text+image-to-image generation function
179
+ def text_and_image_to_image_generate(input_prompt, input_image_path, output_path, vl_chat_processor, vl_gpt, temperature = 1.0, parallel_size = 2, cfg_weight = 5, cfg_weight2 = 5):
180
+ torch.cuda.empty_cache()
181
+
182
+ input_img_tokens = vl_chat_processor.image_start_tag + vl_chat_processor.image_tag*vl_chat_processor.num_image_tokens +vl_chat_processor.image_end_tag + vl_chat_processor.image_start_tag + vl_chat_processor.pad_tag*vl_chat_processor.num_image_tokens +vl_chat_processor.image_end_tag
183
+ output_img_tokens = vl_chat_processor.image_start_tag
184
+
185
+ pre_data = []
186
+ input_images = [input_image_path]
187
+ img_len = len(input_images)
188
+ prompts = input_img_tokens * img_len + input_prompt
189
+ conversation = [
190
+ {"role": "<|User|>","content": prompts},
191
+ {"role": "<|Assistant|>", "content": ""}
192
+ ]
193
+ sft_format = vl_chat_processor.apply_sft_template_for_multi_turn_prompts(
194
+ conversations=conversation,
195
+ sft_format=vl_chat_processor.sft_format,
196
+ system_prompt="",
197
+ )
198
+
199
+ sft_format = sft_format + output_img_tokens
200
+
201
+ mmgpt = vl_gpt
202
+
203
+ image_token_num_per_image = 576
204
+ img_size = 384
205
+ patch_size = 16
206
+
207
+ with torch.inference_mode():
208
+ input_image_pixel_values = process_image(input_images,vl_chat_processor).to(torch.bfloat16).cuda()
209
+ quant_input, emb_loss_input, info_input = mmgpt.gen_vision_model.encode(input_image_pixel_values)
210
+ image_tokens_input = info_input[2].detach().reshape(input_image_pixel_values.shape[0], -1)
211
+ image_embeds_input = mmgpt.prepare_gen_img_embeds(image_tokens_input)
212
+
213
+ input_ids = torch.LongTensor(vl_chat_processor.tokenizer.encode(sft_format))
214
+
215
+ encoder_pixel_values = process_image(input_images,vl_chat_processor).cuda()
216
+ tokens = torch.zeros((parallel_size*3, len(input_ids)), dtype=torch.long)
217
+ for i in range(parallel_size*3):
218
+ tokens[i, :] = input_ids
219
+ if i % 3 == 2:
220
+ tokens[i, 1:-1] = vl_chat_processor.pad_id
221
+ pre_data.append(VLChatProcessorOutput(sft_format=sft_format, pixel_values=encoder_pixel_values, input_ids=tokens[i-2], num_image_tokens=[vl_chat_processor.num_image_tokens] * img_len))
222
+ pre_data.append(VLChatProcessorOutput(sft_format=sft_format, pixel_values=encoder_pixel_values, input_ids=tokens[i-1], num_image_tokens=[vl_chat_processor.num_image_tokens] * img_len))
223
+ pre_data.append(VLChatProcessorOutput(sft_format=sft_format, pixel_values=None, input_ids=tokens[i], num_image_tokens=[]))
224
+
225
+ prepare_inputs = vl_chat_processor.batchify(pre_data)
226
+
227
+ inputs_embeds = mmgpt.prepare_inputs_embeds(
228
+ input_ids=tokens.cuda(),
229
+ pixel_values=prepare_inputs['pixel_values'].to(torch.bfloat16).cuda(),
230
+ images_emb_mask=prepare_inputs['images_emb_mask'].cuda(),
231
+ images_seq_mask=prepare_inputs['images_seq_mask'].cuda()
232
+ )
233
+
234
+ image_gen_indices = (tokens == vl_chat_processor.image_end_id).nonzero()
235
+
236
+ for ii, ind in enumerate(image_gen_indices):
237
+ if ii % 4 == 0:
238
+ offset = ind[1] + 2
239
+ inputs_embeds[ind[0],offset: offset+image_embeds_input.shape[1],:] = image_embeds_input[(ii // 2) % img_len]
240
+
241
+ generated_tokens = torch.zeros((parallel_size, image_token_num_per_image), dtype=torch.int).cuda()
242
+
243
+ for i in range(image_token_num_per_image):
244
+ outputs = mmgpt.language_model.model(inputs_embeds=inputs_embeds, use_cache=True, past_key_values=outputs.past_key_values if i != 0 else None)
245
+ hidden_states = outputs.last_hidden_state
246
+
247
+ logits = mmgpt.gen_head(hidden_states[:, -1, :])
248
+ logit_cond_full = logits[0::3, :]
249
+ logit_cond_part = logits[1::3, :]
250
+ logit_uncond = logits[2::3, :]
251
+
252
+ logit_cond = (logit_cond_full + cfg_weight2 * (logit_cond_part)) / (1 + cfg_weight2)
253
+ logits = logit_uncond + cfg_weight * (logit_cond-logit_uncond)
254
+ probs = torch.softmax(logits / temperature, dim=-1)
255
+
256
+ next_token = torch.multinomial(probs, num_samples=1)
257
+ generated_tokens[:, i] = next_token.squeeze(dim=-1)
258
+
259
+ next_token = torch.cat([next_token.unsqueeze(dim=1), next_token.unsqueeze(dim=1), next_token.unsqueeze(dim=1)], dim=1).view(-1)
260
+ img_embeds = mmgpt.prepare_gen_img_embeds(next_token)
261
+ inputs_embeds = img_embeds.unsqueeze(dim=1)
262
+
263
+ dec = mmgpt.gen_vision_model.decode_code(generated_tokens.to(dtype=torch.int), shape=[parallel_size, 8, img_size//patch_size, img_size//patch_size])
264
+ dec = dec.to(torch.float32).cpu().numpy().transpose(0, 2, 3, 1)
265
+
266
+ dec = np.clip((dec + 1) / 2 * 255, 0, 255)
267
+
268
+ visual_img = np.zeros((parallel_size, img_size, img_size, 3), dtype=np.uint8)
269
+ visual_img[:, :, :] = dec
270
+
271
+ output_images = []
272
+ for i in range(parallel_size):
273
+ save_path = output_path.replace('.png','') + f'_{i}.png'
274
+ PIL.Image.fromarray(visual_img[i]).save(save_path)
275
+ output_images.append(save_path)
276
+ return output_images
277
+
278
+ # Run
279
+ prompt = "Turn the image into a nighttime scene."
280
+ input_image_path = "./test_input.png"
281
+ image_output_path = "./test_output.png"
282
+ text_and_image_to_image_generate(prompt, input_image_path, image_output_path, vl_chat_processor, vl_gpt, parallel_size = 2)
283
+ ```
284
 
285
  ## Citation
286