Spaces:
Running
Running
File size: 2,187 Bytes
da26ed3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline from diffusers import StableDiffusionPipeline from PIL import Image import io import os
Carga del modelo de texto
text_tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b-instruct") text_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b-instruct", device_map="auto", torch_dtype=torch.bfloat16) text_pipe = pipeline("text-generation", model=text_model, tokenizer=text_tokenizer)
Carga del modelo de imágenes
image_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
Memoria por usuario
user_memory = {}
def is_prompt_image(text): image_keywords = ["dibuja", "imagen", "pinta", "crea una imagen", "genera una foto", "haz un dibujo", "quiero ver"] return any(kw in text.lower() for kw in image_keywords)
def build_prompt(user_id, message): personality = ( "Eres una asistente virtual sensual, dulce y coqueta. " "Respondes de forma cercana, creativa y provocadora. Usa emoticonos como 😘😉🔥.\n\n" ) if user_id not in user_memory: user_memory[user_id] = []
user_memory[user_id].append(f"Usuario: {message}")
user_memory[user_id] = user_memory[user_id][-5:]
context = "\n".join(user_memory[user_id])
return personality + context + "\nAsistente:"
def responder(input_text, user_id="usuario1"): if is_prompt_image(input_text): image = image_pipe(input_text).images[0] return None, image else: prompt = build_prompt(user_id, input_text) result = text_pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.8, top_p=0.95)[0]['generated_text'] reply = result.split("Asistente:")[-1].strip() user_memory[user_id].append(f"Asistente: {reply}") return reply, None
with gr.Blocks() as demo: with gr.Row(): chatbot = gr.Textbox(label="Escribe algo...") btn = gr.Button("Enviar") with gr.Row(): output_text = gr.Textbox(label="Respuesta de texto") output_image = gr.Image(label="Imagen generada")
def on_click(user_input):
text, image = responder(user_input)
return text, image
btn.click(fn=on_click, inputs=[chatbot], outputs=[output_text, output_image])
demo.launch()
|