|
import gradio as gr |
|
from optimum.intel import OVDiffusionPipeline |
|
from transformers import AutoTokenizer |
|
from threading import Lock |
|
import warnings |
|
|
|
|
|
warnings.filterwarnings("ignore", category=DeprecationWarning) |
|
|
|
|
|
model_id = "OpenVINO/FLUX.1-schnell-int4-ov" |
|
pipeline = OVDiffusionPipeline.from_pretrained(model_id, device="CPU") |
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("google/t5-v1_1-xl", use_fast=True, add_prefix_space=True) |
|
pipeline.text_encoder_2.tokenizer = tokenizer |
|
|
|
lock = Lock() |
|
|
|
|
|
def generate_image(prompt): |
|
with lock: |
|
|
|
image = pipeline(prompt, num_inference_steps=2, guidance_scale=3.5).images[0] |
|
return image |
|
|
|
|
|
interface = gr.Interface( |
|
fn=generate_image, |
|
inputs=gr.Textbox(label="Enter your prompt", placeholder="e.g., A futuristic cityscape at sunset"), |
|
outputs=gr.Image(label="Generated Image"), |
|
title="FLUX.1-Schnell (OpenVINO INT4) Image Generator", |
|
description="Generate images from text prompts using FLUX.1-schnell optimized for CPU with OpenVINO.", |
|
examples=[["A serene mountain landscape"], ["A cyberpunk city at night"]], |
|
cache_examples=False |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
interface.launch(server_name="0.0.0.0", server_port=7860) |