Spaces:
Paused
Paused
import dotenv | |
dotenv.load_dotenv(override=True) | |
import subprocess | |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) | |
import spaces | |
import gradio as gr | |
import os | |
import argparse | |
import random | |
from datetime import datetime | |
import torch | |
from torchvision.transforms.functional import to_pil_image, to_tensor | |
from accelerate import Accelerator | |
from omnigen2.pipelines.omnigen2.pipeline_omnigen2 import OmniGen2Pipeline | |
from omnigen2.utils.img_util import create_collage | |
from omnigen2.schedulers.scheduling_flow_match_euler_discrete import FlowMatchEulerDiscreteScheduler | |
from omnigen2.schedulers.scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler | |
NEGATIVE_PROMPT = "(((deformed))), blurry, over saturation, bad anatomy, disfigured, poorly drawn face, mutation, mutated, (extra_limb), (ugly), (poorly drawn hands), fused fingers, messy drawing, broken legs censor, censored, censor_bar" | |
ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) | |
pipeline = None | |
accelerator = None | |
save_images = False | |
def load_pipeline(accelerator, weight_dtype, args): | |
pipeline = OmniGen2Pipeline.from_pretrained( | |
args.model_path, | |
torch_dtype=weight_dtype, | |
trust_remote_code=True, | |
) | |
if args.enable_sequential_cpu_offload: | |
pipeline.enable_sequential_cpu_offload() | |
elif args.enable_model_cpu_offload: | |
pipeline.enable_model_cpu_offload() | |
else: | |
pipeline = pipeline.to(accelerator.device) | |
return pipeline | |
def run( | |
prompt, | |
width_input=1024, | |
height_input=1024, | |
scheduler='euler', | |
num_inference_steps=50, | |
image_input_1=None, | |
image_input_2=None, | |
image_input_3=None, | |
negative_prompt=NEGATIVE_PROMPT, | |
guidance_scale_input=5.0, | |
img_guidance_scale_input=2.0, | |
cfg_range_start=0.0, | |
cfg_range_end=1.0, | |
num_images_per_prompt=1, | |
max_input_image_side_length=2048, | |
max_pixels=1024 * 1024, | |
seed_input=-1, | |
progress=gr.Progress(), | |
): | |
""" | |
Generate and/or edit images based on text prompts and optional input images using the OmniGen2 pipeline. | |
Quick Tips for Best Results: | |
- Generate high-resolution images (at least 512x512 recommended). | |
- Be Specific: Instead of "Add bird to desk", try "Add the bird from image 1 to the desk in image 2". | |
- Use English: English prompts currently yield better results. | |
- Adjust image_guidance_scale for better consistency with the reference image: | |
- Image Editing: 1.3 - 2.0 | |
- In-context Generation: 2.0 - 3.0 | |
Args: | |
prompt: Text prompt describing the desired image generation or editing task. | |
width_input: Width of the output image in pixels (default: 1024). | |
height_input: Height of the output image in pixels (default: 1024). | |
scheduler: Scheduler type to use ('euler' or 'dpmsolver') (default: 'euler'). | |
num_inference_steps: Number of denoising steps (default: 50). | |
image_input_1: Optional first input image for editing/reference (default: None). | |
image_input_2: Optional second input image for editing/reference (default: None). | |
image_input_3: Optional third input image for editing/reference (default: None). | |
negative_prompt: Text describing what to avoid in the generation (default: NEGATIVE_PROMPT). | |
guidance_scale_input: Text guidance scale for controlling prompt adherence (default: 5.0). | |
img_guidance_scale_input: Image guidance scale for controlling input image adherence (default: 2.0). | |
cfg_range_start: Start of the classifier-free guidance range (0.0-1.0) (default: 0.0). | |
cfg_range_end: End of the classifier-free guidance range (0.0-1.0) (default: 1.0). | |
num_images_per_prompt: Number of images to generate per prompt (default: 1). | |
max_input_image_side_length: Maximum side length for input images (default: 2048). | |
max_pixels: Maximum total pixels for input images (default: 1048576). | |
seed_input: Random seed for reproducible generation (-1 for random) (default: -1). | |
progress: Gradio progress tracker. (Can be None when not ran on Gradio) | |
Returns: | |
PIL.Image: Generated image or collage of multiple generated images. | |
""" | |
input_images = [image_input_1, image_input_2, image_input_3] | |
input_images = [img for img in input_images if img is not None] | |
if len(input_images) == 0: | |
input_images = None | |
if seed_input == -1: | |
seed_input = random.randint(0, 2**16 - 1) | |
generator = torch.Generator(device=accelerator.device).manual_seed(seed_input) | |
def progress_callback(cur_step, timesteps): | |
frac = (cur_step + 1) / float(timesteps) | |
progress(frac) | |
if scheduler == 'euler': | |
pipeline.scheduler = FlowMatchEulerDiscreteScheduler() | |
elif scheduler == 'dpmsolver': | |
pipeline.scheduler = DPMSolverMultistepScheduler( | |
algorithm_type="dpmsolver++", | |
solver_type="midpoint", | |
solver_order=2, | |
prediction_type="flow_prediction", | |
) | |
results = pipeline( | |
prompt=prompt, | |
input_images=input_images, | |
width=width_input, | |
height=height_input, | |
max_input_image_side_length=max_input_image_side_length, | |
max_pixels=max_pixels, | |
num_inference_steps=num_inference_steps, | |
max_sequence_length=1024, | |
text_guidance_scale=guidance_scale_input, | |
image_guidance_scale=img_guidance_scale_input, | |
cfg_range=(cfg_range_start, cfg_range_end), | |
negative_prompt=negative_prompt, | |
num_images_per_prompt=num_images_per_prompt, | |
generator=generator, | |
output_type="pil", | |
step_func=progress_callback, | |
) | |
progress(1.0) | |
vis_images = [to_tensor(image) * 2 - 1 for image in results.images] | |
output_image = create_collage(vis_images) | |
if save_images: | |
# Create outputs directory if it doesn't exist | |
output_dir = os.path.join(ROOT_DIR, "outputs_gradio") | |
os.makedirs(output_dir, exist_ok=True) | |
# Generate unique filename with timestamp | |
timestamp = datetime.now().strftime("%Y_%m_%d-%H_%M_%S") | |
# Generate unique filename with timestamp | |
output_path = os.path.join(output_dir, f"{timestamp}.png") | |
# Save the image | |
output_image.save(output_path) | |
# Save All Generated Images | |
if len(results.images) > 1: | |
for i, image in enumerate(results.images): | |
image_name, ext = os.path.splitext(output_path) | |
image.save(f"{image_name}_{i}{ext}") | |
return output_image | |
description = """ | |
### 💡 Quick Tips for Best Results (see our [github](https://github.com/VectorSpaceLab/OmniGen2?tab=readme-ov-file#-usage-tips) for more details) | |
- Image Quality: Use high-resolution images (at least 512x512 recommended). | |
- Be Specific: Instead of "Add bird to desk", try "Add the bird from image 1 to the desk in image 2". | |
- Use English: English prompts currently yield better results. | |
- Adjust image_guidance_scale for better consistency with the reference image: | |
- Image Editing: 1.3 - 2.0 | |
- In-context Generation: 2.0 - 3.0 | |
""" | |
article = """ | |
```bibtex | |
@article{wu2025omnigen2, | |
title={OmniGen2: Exploration to Advanced Multimodal Generation}, | |
author={Chenyuan Wu and Pengfei Zheng and Ruiran Yan and Shitao Xiao and Xin Luo and Yueze Wang and Wanli Li and Xiyan Jiang and Yexin Liu and Junjie Zhou and Ze Liu and Ziyi Xia and Chaofan Li and Haoge Deng and Jiahao Wang and Kun Luo and Bo Zhang and Defu Lian and Xinlong Wang and Zhongyuan Wang and Tiejun Huang and Zheng Liu}, | |
journal={arXiv preprint arXiv:2506.18871}, | |
year={2025} | |
} | |
``` | |
""" | |
def main(args): | |
# Gradio | |
with gr.Blocks() as demo: | |
gr.Markdown( | |
"# OmniGen2: Exploration to Advanced Multimodal Generation [paper](https://arxiv.org/abs/2506.18871) [code](https://github.com/VectorSpaceLab/OmniGen2)" | |
) | |
gr.Markdown(description) | |
with gr.Row(): | |
with gr.Column(): | |
# text prompt | |
prompt = gr.Textbox( | |
label='Enter your prompt. Use "first/second image" as reference.', | |
placeholder="Type your prompt here...", | |
) | |
with gr.Row(equal_height=True): | |
# input images | |
image_input_1 = gr.Image(label="First Image", type="pil") | |
image_input_2 = gr.Image(label="Second Image", type="pil") | |
image_input_3 = gr.Image(label="Third Image", type="pil") | |
generate_button = gr.Button("Generate Image") | |
negative_prompt = gr.Textbox( | |
label="Enter your negative prompt", | |
placeholder="Type your negative prompt here...", | |
value=NEGATIVE_PROMPT, | |
) | |
# slider | |
with gr.Row(equal_height=True): | |
height_input = gr.Slider( | |
label="Height", minimum=256, maximum=1024, value=1024, step=128 | |
) | |
width_input = gr.Slider( | |
label="Width", minimum=256, maximum=1024, value=1024, step=128 | |
) | |
with gr.Row(equal_height=True): | |
text_guidance_scale_input = gr.Slider( | |
label="Text Guidance Scale", | |
minimum=1.0, | |
maximum=8.0, | |
value=5.0, | |
step=0.1, | |
) | |
image_guidance_scale_input = gr.Slider( | |
label="Image Guidance Scale", | |
minimum=1.0, | |
maximum=3.0, | |
value=2.0, | |
step=0.1, | |
) | |
with gr.Row(equal_height=True): | |
cfg_range_start = gr.Slider( | |
label="CFG Range Start", | |
minimum=0.0, | |
maximum=1.0, | |
value=0.0, | |
step=0.1, | |
) | |
cfg_range_end = gr.Slider( | |
label="CFG Range End", | |
minimum=0.0, | |
maximum=1.0, | |
value=1.0, | |
step=0.1, | |
) | |
def adjust_end_slider(start_val, end_val): | |
return max(start_val, end_val) | |
def adjust_start_slider(end_val, start_val): | |
return min(end_val, start_val) | |
cfg_range_start.input( | |
fn=adjust_end_slider, | |
inputs=[cfg_range_start, cfg_range_end], | |
outputs=[cfg_range_end] | |
) | |
cfg_range_end.input( | |
fn=adjust_start_slider, | |
inputs=[cfg_range_end, cfg_range_start], | |
outputs=[cfg_range_start] | |
) | |
with gr.Row(equal_height=True): | |
scheduler_input = gr.Dropdown( | |
label="Scheduler", | |
choices=["euler", "dpmsolver"], | |
value="euler", | |
info="The scheduler to use for the model.", | |
) | |
num_inference_steps = gr.Slider( | |
label="Inference Steps", minimum=20, maximum=100, value=50, step=1 | |
) | |
with gr.Row(equal_height=True): | |
num_images_per_prompt = gr.Slider( | |
label="Number of images per prompt", | |
minimum=1, | |
maximum=4, | |
value=1, | |
step=1, | |
) | |
seed_input = gr.Slider( | |
label="Seed", minimum=-1, maximum=2147483647, value=0, step=1 | |
) | |
with gr.Row(equal_height=True): | |
max_input_image_side_length = gr.Slider( | |
label="max_input_image_side_length", | |
minimum=256, | |
maximum=2048, | |
value=2048, | |
step=256, | |
) | |
max_pixels = gr.Slider( | |
label="max_pixels", | |
minimum=256 * 256, | |
maximum=1536 * 1536, | |
value=1024 * 1024, | |
step=256 * 256, | |
) | |
with gr.Column(): | |
with gr.Column(): | |
# output image | |
output_image = gr.Image(label="Output Image") | |
global save_images | |
save_images = gr.Checkbox(label="Save generated images", value=False) | |
global accelerator | |
global pipeline | |
bf16 = True | |
accelerator = Accelerator(mixed_precision="bf16" if bf16 else "no") | |
weight_dtype = torch.bfloat16 if bf16 else torch.float32 | |
pipeline = load_pipeline(accelerator, weight_dtype, args) | |
# click | |
generate_button.click( | |
run, | |
inputs=[ | |
prompt, | |
width_input, | |
height_input, | |
scheduler_input, | |
num_inference_steps, | |
image_input_1, | |
image_input_2, | |
image_input_3, | |
negative_prompt, | |
text_guidance_scale_input, | |
image_guidance_scale_input, | |
cfg_range_start, | |
cfg_range_end, | |
num_images_per_prompt, | |
max_input_image_side_length, | |
max_pixels, | |
seed_input, | |
], | |
outputs=output_image, | |
) | |
gr.Markdown(article) | |
# launch | |
demo.launch(share=args.share, server_port=args.port, allowed_paths=[ROOT_DIR], mcp_server=True) | |
def parse_args(): | |
parser = argparse.ArgumentParser(description="Run the OmniGen2") | |
parser.add_argument("--share", action="store_true", help="Share the Gradio app") | |
parser.add_argument( | |
"--port", type=int, default=7860, help="Port to use for the Gradio app" | |
) | |
parser.add_argument( | |
"--model_path", | |
type=str, | |
default="OmniGen2/OmniGen2", | |
help="Path or HuggingFace name of the model to load." | |
) | |
parser.add_argument( | |
"--enable_model_cpu_offload", | |
action="store_true", | |
help="Enable model CPU offload." | |
) | |
parser.add_argument( | |
"--enable_sequential_cpu_offload", | |
action="store_true", | |
help="Enable sequential CPU offload." | |
) | |
args = parser.parse_args() | |
return args | |
if __name__ == "__main__": | |
args = parse_args() | |
main(args) | |