JuanjoSG5 commited on
Commit
d0a1746
·
1 Parent(s): 76d4323

test: current progress with the space

Browse files
Files changed (1) hide show
  1. gradio_interface/app.py +121 -3
gradio_interface/app.py CHANGED
@@ -1,7 +1,125 @@
 
1
  import gradio as gr
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
  demo.launch()
 
1
+ import os
2
  import gradio as gr
3
+ from os import getenv
4
+ import base64
5
+ from io import BytesIO
6
+ from dotenv import load_dotenv
7
+ import requests
8
+ import socket
9
+ import logging
10
 
11
+ from langchain_openai import ChatOpenAI
12
+ from langchain_core.messages import HumanMessage, AIMessage
13
+ from langchain_core.callbacks import StreamingStdOutCallbackHandler
14
+
15
+ # Configure logging
16
+ logging.basicConfig(level=logging.INFO)
17
+ logger = logging.getLogger(__name__)
18
+
19
+ # Load environment
20
+ dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
21
+ load_dotenv(dotenv_path=dotenv_path)
22
+
23
+ # Debug env
24
+ logger.info(f"OPENROUTER_BASE_URL: {getenv('OPENROUTER_BASE_URL')}")
25
+ logger.info(f"OPENROUTER_API_KEY: {'Found' if getenv('OPENROUTER_API_KEY') else 'Missing'}")
26
+
27
+ # Connectivity test
28
+ def test_connectivity(url="https://openrouter.helicone.ai/api/v1"):
29
+ try:
30
+ return requests.get(url, timeout=5).status_code == 200
31
+ except (requests.RequestException, socket.error) as e:
32
+ logger.error(f"Connectivity test failed: {e}")
33
+ return False
34
+
35
+ if not test_connectivity():
36
+ logger.warning("No network to OpenRouter; responses may fail.")
37
+
38
+ # Initialize LLM with streaming and retry logic
39
+ def init_llm():
40
+ if not test_connectivity():
41
+ raise RuntimeError("No hay conexión a OpenRouter. Verifica red y claves.")
42
+ return ChatOpenAI(
43
+ openai_api_key=getenv("OPENROUTER_API_KEY"),
44
+ openai_api_base=getenv("OPENROUTER_BASE_URL"),
45
+ model_name="google/gemini-flash-1.5",
46
+ streaming=True,
47
+ callbacks=[StreamingStdOutCallbackHandler()],
48
+ model_kwargs={
49
+ "extra_headers": {"Helicone-Auth": f"Bearer {getenv('HELICONE_API_KEY')}"}
50
+ },
51
+ )
52
+
53
+ llm = init_llm()
54
+
55
+ # Helpers
56
+ def encode_image_to_base64(pil_image):
57
+ buffer = BytesIO()
58
+ pil_image.save(buffer, format="PNG")
59
+ return f"data:image/png;base64,{base64.b64encode(buffer.getvalue()).decode()}"
60
+
61
+ # Core logic
62
+ def generate_response(message, chat_history, image):
63
+ messages = [HumanMessage(content="You are an expert image analysis assistant. Answer succinctly.")]
64
+ for msg in chat_history:
65
+ role = msg.get('role')
66
+ content = msg.get('content')
67
+ if role == 'user':
68
+ messages.append(HumanMessage(content=content))
69
+ else:
70
+ messages.append(AIMessage(content=content))
71
+ encoded = encode_image_to_base64(image)
72
+ messages.append(HumanMessage(content={"type":"text","text":message}))
73
+ messages.append(HumanMessage(content={"type":"image_url","image_url":encoded}))
74
+
75
+ try:
76
+ stream_iter = llm.stream(messages)
77
+ if stream_iter is None:
78
+ raise RuntimeError("Received no stream iterator from LLM.")
79
+ partial = ""
80
+ for chunk in stream_iter:
81
+ if chunk is None:
82
+ logger.warning("Received None chunk from stream, skipping.")
83
+ continue
84
+ content = getattr(chunk, 'content', None)
85
+ if content is None:
86
+ logger.warning(f"Chunk without content: {chunk}")
87
+ continue
88
+ partial += content
89
+ yield partial
90
+ except AssertionError as e:
91
+ logger.error(f"AssertionError in stream: {e}")
92
+ yield "⚠️ No response del modelo. Por favor reintenta."
93
+ except Exception as e:
94
+ logger.exception("Unexpected error during streaming response.")
95
+ yield "⚠️ Error al generar respuesta. Intenta más tarde."
96
+
97
+ # Gradio interface
98
+ def process_message(message, chat_history, image):
99
+ if chat_history is None:
100
+ chat_history = []
101
+ if image is None:
102
+ chat_history.append({'role':'assistant','content':'Por favor sube una imagen.'})
103
+ return "", chat_history
104
+ chat_history.append({'role':'user','content':message})
105
+ chat_history.append({'role':'assistant','content':'⏳ Procesando...'})
106
+ yield "", chat_history
107
+ for chunk in generate_response(message, chat_history, image):
108
+ chat_history[-1]['content'] = chunk
109
+ yield "", chat_history
110
+ return "", chat_history
111
+
112
+ with gr.Blocks() as demo:
113
+ with gr.Row():
114
+ with gr.Column(scale=2):
115
+ chatbot = gr.Chatbot(type='messages', height=600)
116
+ msg = gr.Textbox(label="Mensaje", placeholder="Escribe tu pregunta...")
117
+ clear = gr.ClearButton([msg, chatbot])
118
+ with gr.Column(scale=1):
119
+ image_input = gr.Image(type="pil", label="Sube Imagen")
120
+ info = gr.Textbox(label="Info Imagen", interactive=False)
121
+
122
+ msg.submit(process_message, [msg, chatbot, image_input], [msg, chatbot])
123
+ image_input.change(lambda img: f"Tamaño: {img.size}" if img else "Sin imagen.", [image_input], [info])
124
 
 
125
  demo.launch()