Spaces:
Runtime error
Runtime error
import gradio as gr | |
from diffusers import DiffusionPipeline | |
import torch | |
from flask import Flask, request, jsonify | |
import threading | |
import io | |
import base64 | |
from PIL import Image | |
# Carregamento do pipeline do modelo | |
pipe = DiffusionPipeline.from_pretrained( | |
"HiDream-ai/HiDream-E1-Full", | |
torch_dtype=torch.float16, | |
) | |
pipe = pipe.to("cuda") | |
# Flask app para API | |
app = Flask(__name__) | |
def image_to_base64(image: Image.Image) -> str: | |
buffered = io.BytesIO() | |
image.save(buffered, format="PNG") | |
return base64.b64encode(buffered.getvalue()).decode() | |
def text_to_image(): | |
data = request.json | |
prompt = data.get("prompt", "").strip() | |
if not prompt: | |
return jsonify({"error": "Prompt vazio"}), 400 | |
result = pipe(prompt) | |
img_b64 = image_to_base64(result.images[0]) | |
return jsonify({"image_base64": img_b64}) | |
# Executa Flask em uma thread separada | |
def run_flask(): | |
app.run(host="0.0.0.0", port=7860) | |
threading.Thread(target=run_flask, daemon=True).start() | |
# Gradio interface | |
def gerar_imagem_gradio(prompt): | |
result = pipe(prompt) | |
return result.images[0] | |
with gr.Blocks() as demo: | |
gr.Markdown("## Chat Text-to-Image com API Flask integrada") | |
gr.Markdown("API disponível em: `http://localhost:7860/v1/texttoimage/completions`") | |
chat = gr.Chatbot() | |
txt = gr.Textbox(placeholder="Digite seu prompt aqui e pressione Enter", show_label=False) | |
def responder(prompt, chat_history): | |
img = gerar_imagem_gradio(prompt) | |
chat_history = chat_history + [(prompt, img)] | |
return chat_history, "" | |
txt.submit(responder, inputs=[txt, chat], outputs=[chat, txt]) | |
demo.launch(server_name="0.0.0.0", server_port=7860) |