Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,36 @@
|
|
1 |
-
from transformers import pipeline
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline from diffusers import StableDiffusionPipeline from PIL import Image import io import os
|
2 |
+
|
3 |
+
Carga del modelo de texto
|
4 |
+
|
5 |
+
text_tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b-instruct") text_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b-instruct", device_map="auto", torch_dtype=torch.bfloat16) text_pipe = pipeline("text-generation", model=text_model, tokenizer=text_tokenizer)
|
6 |
+
|
7 |
+
Carga del modelo de imágenes
|
8 |
+
|
9 |
+
image_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
|
10 |
+
|
11 |
+
Memoria por usuario
|
12 |
+
|
13 |
+
user_memory = {}
|
14 |
+
|
15 |
+
def is_prompt_image(text): image_keywords = ["dibuja", "imagen", "pinta", "crea una imagen", "genera una foto", "haz un dibujo", "quiero ver"] return any(kw in text.lower() for kw in image_keywords)
|
16 |
+
|
17 |
+
def build_prompt(user_id, message): personality = ( "Eres una asistente virtual sensual, dulce y coqueta. " "Respondes de forma cercana, creativa y provocadora. Usa emoticonos como 😘😉🔥.\n\n" ) if user_id not in user_memory: user_memory[user_id] = []
|
18 |
+
|
19 |
+
user_memory[user_id].append(f"Usuario: {message}")
|
20 |
+
user_memory[user_id] = user_memory[user_id][-5:]
|
21 |
+
|
22 |
+
context = "\n".join(user_memory[user_id])
|
23 |
+
return personality + context + "\nAsistente:"
|
24 |
+
|
25 |
+
def responder(input_text, user_id="usuario1"): if is_prompt_image(input_text): image = image_pipe(input_text).images[0] return None, image else: prompt = build_prompt(user_id, input_text) result = text_pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.8, top_p=0.95)[0]['generated_text'] reply = result.split("Asistente:")[-1].strip() user_memory[user_id].append(f"Asistente: {reply}") return reply, None
|
26 |
+
|
27 |
+
with gr.Blocks() as demo: with gr.Row(): chatbot = gr.Textbox(label="Escribe algo...") btn = gr.Button("Enviar") with gr.Row(): output_text = gr.Textbox(label="Respuesta de texto") output_image = gr.Image(label="Imagen generada")
|
28 |
+
|
29 |
+
def on_click(user_input):
|
30 |
+
text, image = responder(user_input)
|
31 |
+
return text, image
|
32 |
+
|
33 |
+
btn.click(fn=on_click, inputs=[chatbot], outputs=[output_text, output_image])
|
34 |
+
|
35 |
+
demo.launch()
|
36 |
+
|