Almaatla commited on
Commit
60ce19c
·
verified ·
1 Parent(s): 0bb9e0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +123 -1
app.py CHANGED
@@ -147,7 +147,129 @@ async def get_proxy():
147
  }
148
  </style>
149
 
150
- <script>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  function addMessageEntry(direction, source, destination, content) {
152
  const flowDiv = document.getElementById('messageFlow');
153
  const timestamp = new Date().toLocaleTimeString();
 
147
  }
148
  </style>
149
 
150
+ <script>
151
+ let agentClient = null;
152
+ let currentModel = null;
153
+ const systemPrompt = "You are a helpful AI assistant. Respond concisely and accurately.";
154
+ const conversationHistory = [];
155
+ // Modified model initialization
156
+ function showStatus(message, type = 'info') {
157
+ const statusDiv = document.getElementById('systemStatus');
158
+ statusDiv.innerHTML = `<div style="color: ${type === 'error' ? '#F44336' : '#4CAF50'}">${message}</div>`;
159
+ addMessageEntry('system', 'system', 'proxy', message);
160
+
161
+ function initializeClient() {
162
+ const apiKey = document.getElementById('apiKey').value;
163
+ if (!apiKey) {
164
+ showStatus("Please enter an API key", 'error');
165
+ return;
166
+ }
167
+
168
+ agentClient = new ConversationalAgentClient(apiKey);
169
+ agentClient.populateLLMModels()
170
+ .then(models => {
171
+ agentClient.updateModelSelect('modelSelect', models.find(m => m.includes("gemini-2.5")));
172
+ currentModel = document.getElementById('modelSelect').value;
173
+ showStatus(`Loaded ${models.length} models. Default: ${currentModel}`);
174
+ })
175
+ .catch(error => {
176
+ showStatus(`Error fetching models: ${error.message}`, 'error');
177
+ });
178
+ }
179
+
180
+ // Model selection change handler
181
+ document.getElementById('modelSelect').addEventListener('change', function() {
182
+ currentModel = this.value;
183
+ showStatus(`Model changed to: ${currentModel}`);
184
+ });
185
+
186
+ // --- Include provided client classes here ---
187
+ // --- API Client Classes --- (Keep existing classes BaseAgentClient, ConversationalAgentClient)
188
+ class BaseAgentClient {
189
+ constructor(apiKey, apiUrl = 'https://llm.synapse.thalescloud.io/v1/') { this.apiKey = apiKey; this.apiUrl = apiUrl; this.models = []; this.maxCallsPerMinute = 4; this.callTimestamps = []; }
190
+ async fetchLLMModels() { if (!this.apiKey) throw new Error("API Key is not set."); console.log("Fetching models from:", this.apiUrl + 'models'); try { const response = await fetch(this.apiUrl + 'models', { method: 'GET', headers: { 'Authorization': `Bearer ${this.apiKey}` } }); if (!response.ok) { const errorText = await response.text(); console.error("Fetch models error response:", errorText); throw new Error(`HTTP error! Status: ${response.status} - ${errorText}`); } const data = await response.json(); console.log("Models fetched:", data.data); const filteredModels = data.data.map(model => model.id).filter(id => !id.toLowerCase().includes('embed') && !id.toLowerCase().includes('image')); return filteredModels; } catch (error) { console.error('Error fetching LLM models:', error); throw new Error(`Failed to fetch models: ${error.message}`); } }
191
+ async populateLLMModels(defaultModel = "gemini-2.5-pro-exp-03-25") { try { const modelList = await this.fetchLLMModels(); const sortedModels = modelList.sort((a, b) => { if (a === defaultModel) return -1; if (b === defaultModel) return 1; return a.localeCompare(b); }); const finalModels = []; if (sortedModels.includes(defaultModel)) { finalModels.push(defaultModel); sortedModels.forEach(model => { if (model !== defaultModel) finalModels.push(model); }); } else { finalModels.push(defaultModel); finalModels.push(...sortedModels); } this.models = finalModels; console.log("Populated models:", this.models); return this.models; } catch (error) { console.error("Error populating models:", error); this.models = [defaultModel]; throw error; } }
192
+ updateModelSelect(elementId = 'modelSelect', selectedModel = null) { const select = document.getElementById(elementId); if (!select) { console.warn(`Element ID ${elementId} not found.`); return; } const currentSelection = selectedModel || select.value || this.models[0]; select.innerHTML = ''; if (this.models.length === 0 || (this.models.length === 1 && this.models[0] === "gemini-2.5-pro-exp-03-25" && !this.apiKey)) { const option = document.createElement('option'); option.value = ""; option.textContent = "-- Fetch models first --"; option.disabled = true; select.appendChild(option); return; } this.models.forEach(model => { const option = document.createElement('option'); option.value = model; option.textContent = model; if (model === currentSelection) option.selected = true; select.appendChild(option); }); if (!select.value && this.models.length > 0) select.value = this.models[0]; }
193
+ async rateLimitWait() { const currentTime = Date.now(); this.callTimestamps = this.callTimestamps.filter(ts => currentTime - ts <= 60000); if (this.callTimestamps.length >= this.maxCallsPerMinute) { const waitTime = 60000 - (currentTime - this.callTimestamps[0]); const waitSeconds = Math.ceil(waitTime / 1000); const waitMessage = `Rate limit (${this.maxCallsPerMinute}/min) reached. Waiting ${waitSeconds}s...`; console.log(waitMessage); showGenerationStatus(waitMessage, 'warn'); await new Promise(resolve => setTimeout(resolve, waitTime + 100)); showGenerationStatus('Resuming after rate limit wait...', 'info'); this.callTimestamps = this.callTimestamps.filter(ts => Date.now() - ts <= 60000); } }
194
+ async callAgent(model, messages, temperature = 0.7) { await this.rateLimitWait(); const startTime = Date.now(); console.log("Calling Agent:", model); try { const response = await fetch(this.apiUrl + 'chat/completions', { method: 'POST', headers: { 'Content-Type': 'application/json', 'Authorization': `Bearer ${this.apiKey}` }, body: JSON.stringify({ model: model, messages: messages, temperature: temperature }) }); const endTime = Date.now(); this.callTimestamps.push(endTime); console.log(`API call took ${endTime - startTime} ms`); if (!response.ok) { const errorData = await response.json().catch(() => ({ error: { message: response.statusText } })); console.error("API Error:", errorData); throw new Error(errorData.error?.message || `API failed: ${response.status}`); } const data = await response.json(); if (!data.choices || !data.choices[0]?.message) throw new Error("Invalid API response structure"); console.log("API Response received."); return data.choices[0].message.content; } catch (error) { this.callTimestamps.push(Date.now()); console.error('Error calling agent:', error); throw error; } }
195
+ setMaxCallsPerMinute(value) { const parsedValue = parseInt(value, 10); if (!isNaN(parsedValue) && parsedValue > 0) { console.log(`Max calls/min set to: ${parsedValue}`); this.maxCallsPerMinute = parsedValue; return true; } console.warn(`Invalid max calls/min: ${value}`); return false; }
196
+ }
197
+ class ConversationalAgentClient extends BaseAgentClient {
198
+ constructor(apiKey, apiUrl = 'https://llm.synapse.thalescloud.io/v1/') {
199
+ super(apiKey, apiUrl);
200
+ }
201
+
202
+ async call(model, userPrompt, systemPrompt, conversationHistory = [], temperature = 0.7) {
203
+ const messages = [
204
+ { role: 'system', content: systemPrompt },
205
+ ...conversationHistory,
206
+ { role: 'user', content: userPrompt }
207
+ ];
208
+
209
+ const assistantResponse = await super.callAgent(model, messages, temperature);
210
+
211
+ const updatedHistory = [
212
+ ...conversationHistory,
213
+ { role: 'user', content: userPrompt },
214
+ { role: 'assistant', content: assistantResponse }
215
+ ];
216
+
217
+ return {
218
+ response: assistantResponse,
219
+ history: updatedHistory
220
+ };
221
+ }
222
+
223
+ async callWithCodeContext(
224
+ model,
225
+ userPrompt,
226
+ systemPrompt,
227
+ selectedCodeVersionsData = [],
228
+ conversationHistory = [],
229
+ temperature = 0.7
230
+ ) {
231
+ let codeContext = "";
232
+ let fullSystemPrompt = systemPrompt || "";
233
+
234
+ if (selectedCodeVersionsData && selectedCodeVersionsData.length > 0) {
235
+ codeContext = `Code context (chronological):\n\n`;
236
+
237
+ selectedCodeVersionsData.forEach((versionData, index) => {
238
+ if (versionData && typeof versionData.code === 'string') {
239
+ codeContext += `--- Part ${index + 1} (${versionData.version || '?'}) ---\n`;
240
+ codeContext += `${versionData.code}\n\n`;
241
+ } else {
242
+ console.warn(`Invalid context version data at index ${index}`);
243
+ }
244
+ });
245
+
246
+ codeContext += `-------- end context ---\n\nUser request based on context:\n\n`;
247
+ }
248
+
249
+ const fullPrompt = codeContext + userPrompt;
250
+
251
+ const messages = [
252
+ { role: 'system', content: fullSystemPrompt },
253
+ ...conversationHistory,
254
+ { role: 'user', content: fullPrompt }
255
+ ];
256
+
257
+ const assistantResponse = await super.callAgent(model, messages, temperature);
258
+
259
+ const updatedHistory = [
260
+ ...conversationHistory,
261
+ { role: 'user', content: fullPrompt },
262
+ { role: 'assistant', content: assistantResponse }
263
+ ];
264
+
265
+ return {
266
+ response: assistantResponse,
267
+ history: updatedHistory
268
+ };
269
+ }
270
+ }
271
+
272
+
273
  function addMessageEntry(direction, source, destination, content) {
274
  const flowDiv = document.getElementById('messageFlow');
275
  const timestamp = new Date().toLocaleTimeString();