Teddy-Project commited on
Commit
da26ed3
·
verified ·
1 Parent(s): a3d2054

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -19
app.py CHANGED
@@ -1,19 +1,36 @@
1
- from transformers import pipeline
2
- import gradio as gr
3
-
4
- chatbot = pipeline("text-generation", model="mistralai/Mistral-7B-Instruct-v0.1", trust_remote_code=True)
5
-
6
- def predict(user_input, history=[]):
7
- prompt = user_input
8
- response = chatbot(prompt, max_new_tokens=200, do_sample=True)[0]["generated_text"]
9
- history.append((user_input, response))
10
- return history, history
11
-
12
- gr.Interface(
13
- fn=predict,
14
- inputs=["text", "state"],
15
- outputs=["chatbot", "state"],
16
- title="Mi Chatbot Sexy",
17
- description="Habla con una IA encantadora.",
18
- theme="finlaymacklon/boxy_violet"
19
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline from diffusers import StableDiffusionPipeline from PIL import Image import io import os
2
+
3
+ Carga del modelo de texto
4
+
5
+ text_tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b-instruct") text_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b-instruct", device_map="auto", torch_dtype=torch.bfloat16) text_pipe = pipeline("text-generation", model=text_model, tokenizer=text_tokenizer)
6
+
7
+ Carga del modelo de imágenes
8
+
9
+ image_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
10
+
11
+ Memoria por usuario
12
+
13
+ user_memory = {}
14
+
15
+ def is_prompt_image(text): image_keywords = ["dibuja", "imagen", "pinta", "crea una imagen", "genera una foto", "haz un dibujo", "quiero ver"] return any(kw in text.lower() for kw in image_keywords)
16
+
17
+ def build_prompt(user_id, message): personality = ( "Eres una asistente virtual sensual, dulce y coqueta. " "Respondes de forma cercana, creativa y provocadora. Usa emoticonos como 😘😉🔥.\n\n" ) if user_id not in user_memory: user_memory[user_id] = []
18
+
19
+ user_memory[user_id].append(f"Usuario: {message}")
20
+ user_memory[user_id] = user_memory[user_id][-5:]
21
+
22
+ context = "\n".join(user_memory[user_id])
23
+ return personality + context + "\nAsistente:"
24
+
25
+ def responder(input_text, user_id="usuario1"): if is_prompt_image(input_text): image = image_pipe(input_text).images[0] return None, image else: prompt = build_prompt(user_id, input_text) result = text_pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.8, top_p=0.95)[0]['generated_text'] reply = result.split("Asistente:")[-1].strip() user_memory[user_id].append(f"Asistente: {reply}") return reply, None
26
+
27
+ with gr.Blocks() as demo: with gr.Row(): chatbot = gr.Textbox(label="Escribe algo...") btn = gr.Button("Enviar") with gr.Row(): output_text = gr.Textbox(label="Respuesta de texto") output_image = gr.Image(label="Imagen generada")
28
+
29
+ def on_click(user_input):
30
+ text, image = responder(user_input)
31
+ return text, image
32
+
33
+ btn.click(fn=on_click, inputs=[chatbot], outputs=[output_text, output_image])
34
+
35
+ demo.launch()
36
+