Omar ID EL MOUMEN commited on
Commit
8227e25
·
1 Parent(s): 93c72cb

Final version

Browse files
Files changed (8) hide show
  1. Dockerfile +13 -0
  2. README.md +4 -4
  3. app.py +195 -0
  4. index.html +361 -0
  5. mcp_client.py +37 -0
  6. requirements.txt +6 -0
  7. server.py +143 -0
  8. static/proxy_llm.js +262 -0
Dockerfile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10.6
2
+
3
+ RUN useradd -m -u 1000 user
4
+ USER user
5
+ ENV PATH="/home/user/.local/bin:$PATH"
6
+
7
+ WORKDIR /app
8
+
9
+ COPY --chown=user ./requirements.txt requirements.txt
10
+ RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host files.pythonhosted.org --no-cache-dir --upgrade -r requirements.txt
11
+
12
+ COPY --chown=user . /app
13
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
  title: MCPSynapseChat
3
- emoji: 👀
4
- colorFrom: red
5
- colorTo: gray
6
  sdk: docker
7
  pinned: false
8
  license: gpl-3.0
9
- short_description: A MCP chatbot with Synapse LLM (intranet provider)
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: MCPSynapseChat
3
+ emoji: 🗿
4
+ colorFrom: gray
5
+ colorTo: red
6
  sdk: docker
7
  pinned: false
8
  license: gpl-3.0
9
+ short_description: A MCP chabot that communicates with Synapse LLM
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import traceback
2
+ from fastapi import FastAPI, WebSocket
3
+ from fastapi.responses import FileResponse
4
+ import asyncio
5
+ from fastapi.staticfiles import StaticFiles
6
+ from contextlib import asynccontextmanager
7
+ import json
8
+ from fastapi import HTTPException
9
+ from pydantic import BaseModel
10
+ from fastapi.middleware.cors import CORSMiddleware
11
+ from typing import List, Optional, Any, Dict
12
+ from mcp_client import MCPClient
13
+
14
+ mcp = MCPClient()
15
+
16
+ class ChatMessage(BaseModel):
17
+ role: str
18
+ content: str
19
+
20
+ class ChatCompletionRequest(BaseModel):
21
+ model: str = "gemini-2.5-pro-exp-03-25"
22
+ messages: List[ChatMessage]
23
+ tools: Optional[list] = []
24
+ max_tokens: Optional[int] = None
25
+
26
+ class ChatCompletionResponseChoice(BaseModel):
27
+ index: int = 0
28
+ message: ChatMessage
29
+ finish_reason: str = "stop"
30
+
31
+ class ChatCompletionResponse(BaseModel):
32
+ id: str
33
+ object: str = "chat.completion"
34
+ created: int
35
+ model: str
36
+ choices: List[ChatCompletionResponseChoice]
37
+
38
+ @asynccontextmanager
39
+ async def lifespan(app: FastAPI):
40
+ try:
41
+ await mcp.connect()
42
+ print("Connexion au MCP réussi !")
43
+ except Exception as e:
44
+ print("Warning ! : Connexion au MCP impossible\n", str(e))
45
+
46
+ yield
47
+
48
+ if mcp.session:
49
+ try:
50
+ await mcp.exit_stack.aclose()
51
+ print("MCP déconnecté !")
52
+ except Exception as e:
53
+ print("Erreur à la fermeture du MCP\n", str(e))
54
+
55
+ app = FastAPI(lifespan=lifespan)
56
+ app.mount("/static", StaticFiles(directory="static"), name="static")
57
+ app.add_middleware(
58
+ CORSMiddleware,
59
+ allow_credentials=True,
60
+ allow_headers=["*"],
61
+ allow_methods=["*"],
62
+ allow_origins=["*"]
63
+ )
64
+
65
+ class ConnectionManager:
66
+ def __init__(self):
67
+ self.active_connections = {}
68
+ self.response_queues = {}
69
+
70
+ async def connect(self, websocket: WebSocket):
71
+ await websocket.accept()
72
+ self.active_connections[websocket] = None
73
+
74
+ def set_source(self, websocket: WebSocket, source: str):
75
+ if websocket in self.active_connections:
76
+ self.active_connections[websocket] = source
77
+
78
+ async def send_to_dest(self, destination: str, message: str):
79
+ for ws, src in self.active_connections.items():
80
+ if src == destination:
81
+ await ws.send_text(message)
82
+
83
+ def remove(self, websocket: WebSocket):
84
+ if websocket in self.active_connections:
85
+ del self.active_connections[websocket]
86
+
87
+ async def wait_for_response(self, request_id: str, timeout: int = 30):
88
+ queue = asyncio.Queue(maxsize=1)
89
+ self.response_queues[request_id] = queue
90
+ try:
91
+ return await asyncio.wait_for(queue.get(), timeout=timeout)
92
+ finally:
93
+ self.response_queues.pop(request_id, None)
94
+
95
+ manager = ConnectionManager()
96
+
97
+ @app.get("/")
98
+ async def index_page():
99
+ return FileResponse("index.html")
100
+
101
+ # @app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
102
+ # async def chat_completions(request: ChatCompletionRequest):
103
+ # request_id = str(uuid.uuid4())
104
+ # proxy_ws = next((ws for ws, src in manager.active_connections.items() if src == "proxy"), None)
105
+ # if not proxy_ws:
106
+ # raise HTTPException(503, "Proxy client not connected !")
107
+ # user_msg = next((m for m in request.messages if m.role == "user"), None)
108
+ # if not user_msg:
109
+ # raise HTTPException(400, "No user message found !")
110
+
111
+ # proxy_msg = {
112
+ # "request_id": request_id,
113
+ # "content": user_msg.content,
114
+ # "source": "api",
115
+ # "destination": "proxy",
116
+ # "model": request.model,
117
+ # "tools": request.tools,
118
+ # "max_tokens": request.max_tokens
119
+ # }
120
+
121
+ # await proxy_ws.send_text(json.dumps(proxy_msg))
122
+
123
+ # try:
124
+ # response_content = await manager.wait_for_response(request_id)
125
+ # except asyncio.TimeoutError:
126
+ # raise HTTPException(504, "Proxy response timeout")
127
+ # return ChatCompletionResponse(
128
+ # id=request_id,
129
+ # created=int(time.time()),
130
+ # model=request.model,
131
+ # choices=[ChatCompletionResponseChoice(
132
+ # message=ChatMessage(role="assistant", content=response_content)
133
+ # )]
134
+ # )
135
+
136
+ class ToolCallRequest(BaseModel):
137
+ tool_calls: List[Dict[str, Any]]
138
+
139
+ @app.get("/list-tools", response_model=List[Dict[str, Any]])
140
+ async def list_tools():
141
+ if not mcp.session:
142
+ try:
143
+ await mcp.connect()
144
+ except Exception as e:
145
+ raise HTTPException(status_code=503, detail=f"Connexion au MCP impossible !\n{str(e)}")
146
+ try:
147
+ tools = await mcp.list_tools()
148
+ return tools
149
+ except Exception as e:
150
+ raise HTTPException(status_code=500, detail=f"Erreur lors de la récupération des outils: {str(e)}")
151
+
152
+ @app.post("/call-tools")
153
+ async def call_tools(request: ToolCallRequest):
154
+ if not mcp.session:
155
+ try:
156
+ await mcp.connect()
157
+ except Exception as e:
158
+ raise HTTPException(status_code=503, detail=f"Erreur lors de la récupération des outils: {str(e)}")
159
+ try:
160
+ result_tools = []
161
+ for tool_call in request.tool_calls:
162
+ print(tool_call)
163
+ tool = tool_call["function"]
164
+ tool_name = tool["name"]
165
+ tool_args = tool["arguments"]
166
+ result = await mcp.session.call_tool(tool_name, json.loads(tool_args))
167
+ result_tools.append({
168
+ "role": "user",
169
+ "content": result.content[0].text
170
+ })
171
+ print("Finished !")
172
+ return result_tools
173
+ except Exception as e:
174
+ raise HTTPException(status_code=500, detail=f"Erreur lors de l'appel des outils: {str(e)}")
175
+
176
+
177
+
178
+
179
+ @app.websocket("/ws")
180
+ async def websocket_endpoint(websocket: WebSocket):
181
+ await manager.connect(websocket)
182
+ try:
183
+ data = await websocket.receive_text()
184
+ init_msg = json.loads(data)
185
+ if 'source' in init_msg:
186
+ manager.set_source(websocket, init_msg['source'])
187
+ print(init_msg['source'])
188
+
189
+ while True:
190
+ message = await websocket.receive_text()
191
+ msg_data = json.loads(message)
192
+ await manager.send_to_dest(msg_data["destination"], message)
193
+ except Exception as e:
194
+ manager.remove(websocket)
195
+ await websocket.close()
index.html ADDED
@@ -0,0 +1,361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+
4
+ <head>
5
+ <title>Chat System with LLM Proxy</title>
6
+ <style>
7
+ body {
8
+ font-family: Arial, sans-serif;
9
+ margin: 0;
10
+ padding: 20px;
11
+ }
12
+
13
+ .container {
14
+ display: flex;
15
+ gap: 20px;
16
+ height: 90vh;
17
+ }
18
+
19
+ .panel {
20
+ flex: 1;
21
+ border: 1px solid #ddd;
22
+ border-radius: 8px;
23
+ padding: 15px;
24
+ display: flex;
25
+ flex-direction: column;
26
+ }
27
+
28
+ h2 {
29
+ margin-top: 0;
30
+ border-bottom: 1px solid #eee;
31
+ padding-bottom: 10px;
32
+ }
33
+
34
+ .chat-container {
35
+ height: 300px;
36
+ overflow-y: scroll;
37
+ border: 1px solid #eee;
38
+ padding: 10px;
39
+ margin-bottom: 10px;
40
+ flex: 1;
41
+ }
42
+
43
+ .input-container {
44
+ display: flex;
45
+ gap: 10px;
46
+ }
47
+
48
+ input[type="text"],
49
+ input[type="password"] {
50
+ flex: 1;
51
+ padding: 8px;
52
+ border: 1px solid #ddd;
53
+ border-radius: 4px;
54
+ }
55
+
56
+ button {
57
+ padding: 8px 15px;
58
+ background-color: #4CAF50;
59
+ color: white;
60
+ border: none;
61
+ border-radius: 4px;
62
+ cursor: pointer;
63
+ }
64
+
65
+ button:hover {
66
+ background-color: #45a049;
67
+ }
68
+
69
+ .message {
70
+ margin-bottom: 10px;
71
+ padding: 8px;
72
+ border-radius: 8px;
73
+ }
74
+
75
+ .user-message {
76
+ background-color: #e1f5fe;
77
+ align-self: flex-end;
78
+ }
79
+
80
+ .assistant-message {
81
+ background-color: #f1f1f1;
82
+ }
83
+
84
+ .connection-status {
85
+ color: #666;
86
+ font-size: 0.9em;
87
+ margin-top: 10px;
88
+ }
89
+
90
+ .message-entry {
91
+ margin: 5px 0;
92
+ padding: 8px;
93
+ border-radius: 8px;
94
+ background: white;
95
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
96
+ font-family: monospace;
97
+ }
98
+
99
+ .incoming {
100
+ border-left: 4px solid #4CAF50;
101
+ }
102
+
103
+ .outgoing {
104
+ border-left: 4px solid #2196F3;
105
+ }
106
+
107
+ .system {
108
+ border-left: 4px solid #9C27B0;
109
+ }
110
+
111
+ .error {
112
+ border-left: 4px solid #F44336;
113
+ }
114
+
115
+ .message-header {
116
+ display: flex;
117
+ justify-content: space-between;
118
+ font-size: 0.8em;
119
+ color: #666;
120
+ margin-bottom: 4px;
121
+ }
122
+
123
+ .tabs {
124
+ display: flex;
125
+ margin-bottom: 15px;
126
+ }
127
+
128
+ .tab {
129
+ padding: 10px 20px;
130
+ cursor: pointer;
131
+ border: 1px solid #ddd;
132
+ border-radius: 4px 4px 0 0;
133
+ margin-right: 5px;
134
+ }
135
+
136
+ .tab.active {
137
+ background-color: #f1f1f1;
138
+ border-bottom: none;
139
+ }
140
+
141
+ .tab-content {
142
+ display: none;
143
+ }
144
+
145
+ .tab-content.active {
146
+ display: block;
147
+ flex: 1;
148
+ display: flex;
149
+ flex-direction: column;
150
+ }
151
+ </style>
152
+ </head>
153
+
154
+ <body>
155
+ <div class="tabs">
156
+ <div class="tab active" onclick="switchTab('chat')">Chat Client</div>
157
+ <div class="tab" onclick="switchTab('proxy')">Proxy Configuration</div>
158
+ </div>
159
+
160
+ <div class="container">
161
+ <!-- Chat Client Panel -->
162
+ <div id="chat-tab" class="tab-content active panel">
163
+ <h2>Chat Client</h2>
164
+ <div id="chat" class="chat-container"></div>
165
+ <div class="input-container">
166
+ <input id="msg" type="text" placeholder="Type your message here...">
167
+ <button onclick="sendMessage()">Send</button>
168
+ </div>
169
+ <div id="client-status" class="connection-status">Connecting...</div>
170
+ </div>
171
+
172
+ <!-- Proxy Configuration Panel -->
173
+ <div id="proxy-tab" class="tab-content panel">
174
+ <h2>LLM Proxy Configuration</h2>
175
+ <div style="margin-bottom: 20px;">
176
+ <input type="password" id="apiKey" placeholder="Enter API Key" style="width: 100%;">
177
+ <button onclick="initializeClient()" style="margin-top: 10px;">Fetch Models</button>
178
+ </div>
179
+ <select id="modelSelect" style="width: 100%; margin-bottom: 20px;"></select>
180
+ <div id="systemStatus" class="connection-status"></div>
181
+
182
+ <h3>Message Flow</h3>
183
+ <div id="messageFlow"
184
+ style="flex: 1; border: 1px solid #eee; padding: 10px; overflow-y: auto; background: #f9f9f9;">
185
+ <div style="text-align: center; color: #999; margin-bottom: 10px;">Message Flow</div>
186
+ </div>
187
+ <div id="detailedStatus" class="connection-status"></div>
188
+ </div>
189
+ </div>
190
+
191
+ <script>
192
+ function showStatus(message, type = 'info') {
193
+ const statusDiv = document.getElementById('systemStatus');
194
+ statusDiv.innerHTML = `<div style="color: ${type === 'error' ? '#F44336' : '#4CAF50'}">${message}</div>`;
195
+ addMessageEntry('system', 'system', 'proxy', message);
196
+ }
197
+ // Tab switching functionality
198
+ function switchTab(tabName) {
199
+ document.querySelectorAll('.tab').forEach(tab => tab.classList.remove('active'));
200
+ document.querySelectorAll('.tab-content').forEach(content => content.classList.remove('active'));
201
+
202
+ document.querySelector(`.tab[onclick="switchTab('${tabName}')"]`).classList.add('active');
203
+ document.getElementById(`${tabName}-tab`).classList.add('active');
204
+ }
205
+
206
+ // Client WebSocket
207
+ const clientWs = new WebSocket('wss://' + window.location.host + '/ws');
208
+ clientWs.onopen = () => {
209
+ clientWs.send(JSON.stringify({
210
+ source: 'user'
211
+ }));
212
+ document.getElementById('client-status').textContent = 'Connected';
213
+ };
214
+ clientWs.onclose = () => {
215
+ document.getElementById('client-status').textContent = 'Disconnected';
216
+ };
217
+ clientWs.onmessage = e => {
218
+ const msg = JSON.parse(e.data);
219
+ const chatDiv = document.getElementById('chat');
220
+ chatDiv.innerHTML += `<div class="message assistant-message">${msg.content}</div>`;
221
+ chatDiv.scrollTop = chatDiv.scrollHeight;
222
+ };
223
+
224
+ function sendMessage() {
225
+ const input = document.getElementById('msg');
226
+ const content = input.value.trim();
227
+ if (content) {
228
+ const message = {
229
+ content: content,
230
+ source: 'user',
231
+ destination: 'proxy',
232
+ request_id: generateUUID()
233
+ };
234
+ clientWs.send(JSON.stringify(message));
235
+
236
+ const chatDiv = document.getElementById('chat');
237
+ chatDiv.innerHTML += `<div class="message user-message">${content}</div>`;
238
+ chatDiv.scrollTop = chatDiv.scrollHeight;
239
+
240
+ input.value = '';
241
+ }
242
+ }
243
+ document.getElementById('msg').addEventListener('keypress', function (e) {
244
+ if (e.key === 'Enter') {
245
+ sendMessage();
246
+ }
247
+ });
248
+
249
+ // Proxy WebSocket
250
+ let proxyWs = new WebSocket('wss://' + window.location.host + '/ws');
251
+ proxyWs.onopen = () => {
252
+ proxyWs.send(JSON.stringify({
253
+ source: 'proxy'
254
+ }));
255
+ showStatus('Connected to server');
256
+ };
257
+ proxyWs.onclose = () => {
258
+ showStatus('Disconnected from server', 'error');
259
+ };
260
+ proxyWs.onmessage = async e => {
261
+ const msg = JSON.parse(e.data);
262
+
263
+ // Display incoming messages
264
+ if (msg.destination === 'proxy') {
265
+ let tools = null
266
+ addMessageEntry('incoming', msg.source, 'proxy', msg.content);
267
+ document.getElementById('detailedStatus').textContent = `Processing ${msg.source} request...`;
268
+
269
+ try {
270
+ const response = await fetch("/list-tools");
271
+ tools = await response.json();
272
+ } catch (error) {
273
+ console.log(`Failed to fetch tools : ${error}`);
274
+ tools = null;
275
+ }
276
+
277
+ try {
278
+ if (!agentClient) {
279
+ throw new Error(
280
+ "LLM client not initialized. Please enter API key and fetch models first.");
281
+ }
282
+
283
+ if (!currentModel) {
284
+ throw new Error("No model selected. Please select a model first.");
285
+ }
286
+
287
+ let llmResponse = await agentClient.call(
288
+ currentModel,
289
+ msg.content,
290
+ conversationHistory,
291
+ tools
292
+ );
293
+
294
+ conversationHistory = llmResponse.history
295
+
296
+ // Display outgoing response
297
+ addMessageEntry('outgoing', 'proxy', msg.source, llmResponse.response);
298
+
299
+ if(llmResponse.response.tool_calls != null){
300
+ try {
301
+ console.log("Calling ....")
302
+ const toolCalls = await fetch("/call-tools", {
303
+ method: "POST",
304
+ headers: {
305
+ "Content-Type": "application/json"
306
+ },
307
+ body: JSON.stringify({tool_calls: llmResponse.response.tool_calls})
308
+ })
309
+ const toolCallsJson = await toolCalls.json()
310
+ console.log("Succeed !")
311
+ for(const toolCall of toolCallsJson){
312
+ conversationHistory.push(toolCall)
313
+ }
314
+
315
+ llmResponse = await agentClient.call(
316
+ currentModel,
317
+ null,
318
+ conversationHistory,
319
+ null
320
+ )
321
+
322
+ conversationHistory = llmResponse.history
323
+ } catch (error) {
324
+ throw new Error("Error on calling tools " + error)
325
+ }
326
+ }
327
+
328
+ const responseMsg = {
329
+ request_id: msg.request_id,
330
+ content: llmResponse.response.content,
331
+ source: 'proxy',
332
+ destination: msg.source
333
+ };
334
+ proxyWs.send(JSON.stringify(responseMsg));
335
+
336
+ document.getElementById('detailedStatus').textContent = `Response sent to ${msg.source}`;
337
+ } catch (error) {
338
+ addMessageEntry('error', 'system', 'proxy', `Error: ${error.message}`);
339
+ const errorResponse = {
340
+ request_id: msg.request_id,
341
+ content: `Error: ${error.message}`,
342
+ source: 'proxy',
343
+ destination: msg.source
344
+ };
345
+ proxyWs.send(JSON.stringify(errorResponse));
346
+ }
347
+ }
348
+ };
349
+
350
+ function generateUUID() {
351
+ return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function (c) {
352
+ var r = Math.random() * 16 | 0,
353
+ v = c == 'x' ? r : (r & 0x3 | 0x8);
354
+ return v.toString(16);
355
+ });
356
+ }
357
+ </script>
358
+ <script src="/static/proxy_llm.js"></script>
359
+ </body>
360
+
361
+ </html>
mcp_client.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from contextlib import AsyncExitStack
3
+
4
+ from mcp import ClientSession, StdioServerParameters
5
+ from mcp.client.stdio import stdio_client
6
+ from websockets import ClientConnection
7
+
8
+
9
+ class MCPClient:
10
+ def __init__(self):
11
+ self.session: Optional[ClientSession] = None
12
+ self.exit_stack = AsyncExitStack()
13
+ self.stdio = None
14
+ self.write = None
15
+ self.ws: Optional[ClientConnection] = None
16
+
17
+ async def connect(self):
18
+ server_params = StdioServerParameters(
19
+ command="uv",
20
+ args=["--directory", "/app", "run", "server.py"],
21
+ env=None
22
+ )
23
+
24
+ stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
25
+ self.stdio, self.write = stdio_transport
26
+ self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write))
27
+
28
+ await self.session.initialize()
29
+
30
+ async def list_tools(self):
31
+ tools = await self.session.list_tools()
32
+ tools_openai = [{
33
+ "name": tool.name,
34
+ "description": tool.description,
35
+ "parameters": tool.inputSchema
36
+ } for tool in tools.tools]
37
+ return tools_openai
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ uvicorn[standard]
2
+ fastapi
3
+ mcp[cli]
4
+ httpx
5
+ websockets
6
+ uv
server.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Literal
2
+ import httpx
3
+ import traceback
4
+ from mcp.server.fastmcp import FastMCP
5
+
6
+ # Initialize FastMCP server
7
+ mcp = FastMCP("arxiv-omar")
8
+
9
+ # Constants
10
+ CUSTOM_ARXIV_API_BASE = "https://om4r932-arxiv.hf.space"
11
+ DDG_API_BASE = "https://ychkhan-ptt-endpoints.hf.space"
12
+ API_3GPP_BASE = "https://organizedprogrammers-3gppdocfinder.hf.space"
13
+
14
+ # Helpers
15
+ async def make_request(url: str, data: dict = None) -> dict[str, Any] | None:
16
+ if data is None:
17
+ return None
18
+ headers = {
19
+ "Accept": "application/json"
20
+ }
21
+ async with httpx.AsyncClient(verify=False) as client:
22
+ try:
23
+ response = await client.post(url, headers=headers, json=data)
24
+ print(response)
25
+ response.raise_for_status()
26
+ return response.json()
27
+ except Exception as e:
28
+ traceback.print_exception(e)
29
+ return None
30
+
31
+ def format_search(pub_id: str, content: dict) -> str:
32
+ return f"""
33
+ arXiv publication ID : {pub_id}
34
+ Title : {content["title"]}
35
+ Authors : {content["authors"]}
36
+ Release Date : {content["date"]}
37
+ Abstract : {content["abstract"]}
38
+ PDF link : {content["pdf"]}
39
+ """
40
+
41
+ def format_extract(message: dict) -> str:
42
+ return f"""
43
+ Title of PDF : {message.get("title", "No title has been found")}
44
+ Text : {message.get("text", "No text !")}
45
+ """
46
+
47
+ def format_result_search(page: dict) -> str:
48
+ return f"""
49
+ Title : {page.get("title", "No titles found !")}
50
+ Little description : {page.get("body", "No description")}
51
+ PDF url : {page.get("url", None)}
52
+ """
53
+
54
+ def format_3gpp_doc_result(result: dict, release: int = None) -> str:
55
+ return f"""
56
+ Document ID : {result.get("doc_id")}
57
+ Release version : {release if release is not None else "Not specified"}
58
+ URL : {result.get("url", "No URL found !")}
59
+ """
60
+
61
+ # Tools
62
+ @mcp.tool()
63
+ async def get_publications(keyword: str, limit: int = 15) -> str:
64
+ """
65
+ Get arXiv publications based on keywords and limit of documents
66
+
67
+ Args:
68
+ keyword: Keywords separated by spaces
69
+ limit: Numbers of maximum publications returned (by default, 15)
70
+ """
71
+ url = f"{CUSTOM_ARXIV_API_BASE}/search"
72
+ data = await make_request(url, data={'keyword': keyword, 'limit': limit})
73
+ if data["error"]:
74
+ return data["message"]
75
+ if not data:
76
+ return "Unable to fetch publications"
77
+ if len(data["message"].keys()) == 0:
78
+ return "No publications found"
79
+
80
+ publications = [format_search(pub_id, content) for (pub_id, content) in data["message"].items()]
81
+ return "\n--\n".join(publications)
82
+
83
+ @mcp.tool()
84
+ async def web_pdf_search(query: str) -> str:
85
+ """
86
+ Search on the Web (with DuckDuckGo search engine) to get PDF documents based on the keywords
87
+
88
+ Args:
89
+ query: Keywords to search documents on the Web
90
+ """
91
+
92
+ url = f"{DDG_API_BASE}/search"
93
+ data = await make_request(url, data={"query": query})
94
+ if not data:
95
+ return "Unable to fetch results"
96
+ if len(data["results"]) == 0:
97
+ return "No results found"
98
+
99
+ results = [format_result_search(result) for result in data["results"]]
100
+ return "\n--\n".join(results)
101
+
102
+ @mcp.tool()
103
+ async def get_3gpp_doc_url_byID(doc_id: str, release: int = None):
104
+ """
105
+ Get 3GPP Technical Document URL by their document ID.
106
+
107
+ Args:
108
+ doc_id: Document ID (i.e. C4-125411, SP-551242, 31.101)
109
+ release : The release version of the document (by default, None)
110
+ """
111
+ url = f"{API_3GPP_BASE}/find"
112
+ data = await make_request(url, data={"doc_id": doc_id, "release": release})
113
+ if not data:
114
+ return "Unable to search document in 3GPP"
115
+
116
+ return format_3gpp_doc_result(data, release)
117
+
118
+ @mcp.tool()
119
+ async def get_pdf_text(pdf_url: str, limit_page: int = -1) -> str:
120
+ """
121
+ Extract the text from the URL pointing to a PDF file
122
+
123
+ Args:
124
+ pdf_url: URL to a PDF document
125
+ limit_page: How many pages the user wants to extract the content (default: -1 for all pages)
126
+ """
127
+
128
+ url = f"{CUSTOM_ARXIV_API_BASE}/extract_pdf/url"
129
+ data = {"url": pdf_url}
130
+ if limit_page != -1:
131
+ data["page_num"] = limit_page
132
+ data = await make_request(url, data=data)
133
+ if data["error"]:
134
+ return data["message"]
135
+ if not data:
136
+ return "Unable to extract PDF text"
137
+ if len(data["message"].keys()) == 0:
138
+ return "No text can be extracted from this PDF"
139
+
140
+ return format_extract(data["message"])
141
+
142
+ if __name__ == "__main__":
143
+ mcp.run(transport="stdio")
static/proxy_llm.js ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // LLM Client Implementation
2
+ let agentClient = null;
3
+ let currentModel = null;
4
+ let conversationHistory = [];
5
+
6
+ function initializeClient() {
7
+ const apiKey = document.getElementById('apiKey').value;
8
+ if (!apiKey) {
9
+ showStatus("Please enter an API key", 'error');
10
+ return;
11
+ }
12
+
13
+ agentClient = new ConversationalAgentClient(apiKey);
14
+ agentClient.populateLLMModels()
15
+ .then(models => {
16
+ agentClient.updateModelSelect('modelSelect', models.find(m => m.includes("gemini-2.5")));
17
+ currentModel = document.getElementById('modelSelect').value;
18
+ showStatus(`Loaded ${models.length} models. Default: ${currentModel}`);
19
+ })
20
+ .catch(error => {
21
+ showStatus(`Error fetching models: ${error.message}`, 'error');
22
+ });
23
+ }
24
+
25
+ function addMessageEntry(direction, source, destination, content) {
26
+ const flowDiv = document.getElementById('messageFlow');
27
+ const timestamp = new Date().toLocaleTimeString();
28
+
29
+ const entry = document.createElement('div');
30
+ entry.className = `message-entry ${direction}`;
31
+ entry.innerHTML = `
32
+ <div class="message-header">
33
+ <span>${source} → ${destination}</span>
34
+ <span>${timestamp}</span>
35
+ </div>
36
+ <div style="white-space: pre-wrap;">${content}</div>
37
+ `;
38
+
39
+ flowDiv.appendChild(entry);
40
+ flowDiv.scrollTop = flowDiv.scrollHeight;
41
+ }
42
+
43
+ // LLM Client Classes
44
+ class BaseAgentClient {
45
+ constructor(apiKey, apiUrl = 'https://llm.synapse.thalescloud.io/v1/') {
46
+ this.apiKey = apiKey;
47
+ this.apiUrl = apiUrl;
48
+ this.models = [];
49
+ this.tools = [];
50
+ this.maxCallsPerMinute = 4;
51
+ this.callTimestamps = [];
52
+ }
53
+
54
+ setTools(tools) {
55
+ this.tools = tools;
56
+ }
57
+
58
+ async fetchLLMModels() {
59
+ if (!this.apiKey) throw new Error("API Key is not set.");
60
+ console.log("Fetching models from:", this.apiUrl + 'models');
61
+
62
+ try {
63
+ const response = await fetch(this.apiUrl + 'models', {
64
+ method: 'GET',
65
+ headers: {
66
+ 'Authorization': `Bearer ${this.apiKey}`
67
+ }
68
+ });
69
+
70
+ if (!response.ok) {
71
+ const errorText = await response.text();
72
+ console.error("Fetch models error response:", errorText);
73
+ throw new Error(`HTTP error! Status: ${response.status} - ${errorText}`);
74
+ }
75
+
76
+ const data = await response.json();
77
+ console.log("Models fetched:", data.data);
78
+
79
+ const filteredModels = data.data
80
+ .map(model => model.id)
81
+ .filter(id => !id.toLowerCase().includes('embed') && !id.toLowerCase().includes('image'));
82
+
83
+ return filteredModels;
84
+ } catch (error) {
85
+ console.error('Error fetching LLM models:', error);
86
+ throw new Error(`Failed to fetch models: ${error.message}`);
87
+ }
88
+ }
89
+
90
+ async populateLLMModels(defaultModel = "gemini-2.5-pro-exp-03-25") {
91
+ try {
92
+ const modelList = await this.fetchLLMModels();
93
+
94
+ const sortedModels = modelList.sort((a, b) => {
95
+ if (a === defaultModel) return -1;
96
+ if (b === defaultModel) return 1;
97
+ return a.localeCompare(b);
98
+ });
99
+
100
+ const finalModels = [];
101
+
102
+ if (sortedModels.includes(defaultModel)) {
103
+ finalModels.push(defaultModel);
104
+ sortedModels.forEach(model => {
105
+ if (model !== defaultModel) finalModels.push(model);
106
+ });
107
+ } else {
108
+ finalModels.push(defaultModel);
109
+ finalModels.push(...sortedModels);
110
+ }
111
+
112
+ this.models = finalModels;
113
+ console.log("Populated models:", this.models);
114
+ return this.models;
115
+ } catch (error) {
116
+ console.error("Error populating models:", error);
117
+ this.models = [defaultModel];
118
+ throw error;
119
+ }
120
+ }
121
+
122
+ updateModelSelect(elementId = 'modelSelect', selectedModel = null) {
123
+ const select = document.getElementById(elementId);
124
+ if (!select) {
125
+ console.warn(`Element ID ${elementId} not found.`);
126
+ return;
127
+ }
128
+
129
+ const currentSelection = selectedModel || select.value || this.models[0];
130
+ select.innerHTML = '';
131
+
132
+ if (this.models.length === 0 || (this.models.length === 1 && this.models[0] === "gemini-2.5-pro-exp-03-25" && !this.apiKey)) {
133
+ const option = document.createElement('option');
134
+ option.value = "";
135
+ option.textContent = "-- Fetch models first --";
136
+ option.disabled = true;
137
+ select.appendChild(option);
138
+ return;
139
+ }
140
+
141
+ this.models.forEach(model => {
142
+ const option = document.createElement('option');
143
+ option.value = model;
144
+ option.textContent = model;
145
+ if (model === currentSelection) option.selected = true;
146
+ select.appendChild(option);
147
+ });
148
+
149
+ if (!select.value && this.models.length > 0) select.value = this.models[0];
150
+ }
151
+
152
+ async rateLimitWait() {
153
+ const currentTime = Date.now();
154
+ this.callTimestamps = this.callTimestamps.filter(ts => currentTime - ts <= 60000);
155
+
156
+ if (this.callTimestamps.length >= this.maxCallsPerMinute) {
157
+ const waitTime = 60000 - (currentTime - this.callTimestamps[0]);
158
+ const waitSeconds = Math.ceil(waitTime / 1000);
159
+ const waitMessage = `Rate limit (${this.maxCallsPerMinute}/min) reached. Waiting ${waitSeconds}s...`;
160
+
161
+ console.log(waitMessage);
162
+ showStatus(waitMessage, 'warn');
163
+
164
+ await new Promise(resolve => setTimeout(resolve, waitTime + 100));
165
+
166
+ showStatus('Resuming after rate limit wait...', 'info');
167
+ this.callTimestamps = this.callTimestamps.filter(ts => Date.now() - ts <= 60000);
168
+ }
169
+ }
170
+
171
+ async callAgent(model, messages, tools = null) {
172
+ await this.rateLimitWait();
173
+ const startTime = Date.now();
174
+ console.log("Calling Agent:", model);
175
+
176
+ let body = {
177
+ model: model,
178
+ messages: messages
179
+ }
180
+
181
+ body.tools = tools;
182
+
183
+ try {
184
+ const response = await fetch(this.apiUrl + 'chat/completions', {
185
+ method: 'POST',
186
+ headers: {
187
+ 'Content-Type': 'application/json',
188
+ 'Authorization': `Bearer ${this.apiKey}`
189
+ },
190
+ body: JSON.stringify(body)
191
+ });
192
+
193
+ const endTime = Date.now();
194
+ this.callTimestamps.push(endTime);
195
+ console.log(`API call took ${endTime - startTime} ms`);
196
+
197
+ if (!response.ok) {
198
+ const errorData = await response.json().catch(() => ({ error: { message: response.statusText } }));
199
+ console.error("API Error:", errorData);
200
+ throw new Error(errorData.error?.message || `API failed: ${response.status}`);
201
+ }
202
+
203
+ const data = await response.json();
204
+ if (!data.choices || !data.choices[0]?.message) throw new Error("Invalid API response structure");
205
+
206
+ console.log("API Response received.");
207
+ return data.choices[0].message;
208
+ } catch (error) {
209
+ this.callTimestamps.push(Date.now());
210
+ console.error('Error calling agent:', error);
211
+ throw error;
212
+ }
213
+ }
214
+
215
+ setMaxCallsPerMinute(value) {
216
+ const parsedValue = parseInt(value, 10);
217
+ if (!isNaN(parsedValue) && parsedValue > 0) {
218
+ console.log(`Max calls/min set to: ${parsedValue}`);
219
+ this.maxCallsPerMinute = parsedValue;
220
+ return true;
221
+ }
222
+ console.warn(`Invalid max calls/min: ${value}`);
223
+ return false;
224
+ }
225
+ }
226
+
227
+ class ConversationalAgentClient extends BaseAgentClient {
228
+ constructor(apiKey, apiUrl = 'https://llm.synapse.thalescloud.io/v1/') {
229
+ super(apiKey, apiUrl);
230
+ }
231
+
232
+ async call(model, userPrompt, conversationHistory = [], tools) {
233
+ const messages = userPrompt ? [
234
+ ...conversationHistory,
235
+ { role: 'user', content: userPrompt }
236
+ ] : [
237
+ ...conversationHistory
238
+ ];
239
+
240
+ const assistantResponse = await super.callAgent(model, messages, tools);
241
+
242
+ const updatedHistory = userPrompt ? [
243
+ ...conversationHistory,
244
+ { role: 'user', content: userPrompt },
245
+ { role: assistantResponse.role, content: assistantResponse.content }
246
+ ] : [
247
+ ...conversationHistory,
248
+ { role: assistantResponse.role, content: assistantResponse.content }
249
+ ];
250
+
251
+ return {
252
+ response: assistantResponse,
253
+ history: updatedHistory
254
+ };
255
+ }
256
+ }
257
+
258
+ // Model selection change handler
259
+ document.getElementById('modelSelect').addEventListener('change', function() {
260
+ currentModel = this.value;
261
+ showStatus(`Model changed to: ${currentModel}`);
262
+ });