krrishk22 commited on
Commit
1ff0c61
·
verified ·
1 Parent(s): f2faa27

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -71
app.py CHANGED
@@ -224,83 +224,35 @@ def get_festivals_today(date: str = None) -> str:
224
 
225
  final_answer = FinalAnswerTool()
226
 
227
- image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
228
 
229
- with open("prompts.yaml", 'r') as stream:
230
- prompt_templates = yaml.safe_load(stream)
231
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
232
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
233
 
234
-
235
- # Define Models
236
- primary_model = HfApiModel(
237
- max_tokens=2096,
238
- temperature=0.5,
239
- model_id='sarvamai/sarvam-m', # Primary model, possibly overloaded
240
  )
241
-
242
- backup_model = HfApiModel(
243
- max_tokens=2096,
244
- temperature=0.5,
245
- model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud', # Backup model
246
  )
247
 
248
- # Function to Build an Agent with Any Model
249
- def build_agent(model_to_use):
250
- # Assuming the tools are defined elsewhere
251
- tools = [
252
- # final_answer, get_horoscope, get_date_panchang,
253
- # get_holidays, get_panchang_field, get_festivals_today,
254
- # get_current_time_in_timezone, my_custom_tool, image_generation_tool
255
- ]
256
- return CodeAgent(
257
- model=model_to_use,
258
- tools=tools,
259
- max_steps=6,
260
- verbosity_level=1,
261
- grammar=None,
262
- planning_interval=None,
263
- name=None,
264
- description=None,
265
- prompt_templates=prompt_templates
266
- )
267
-
268
- # Instantiate Primary Agent
269
- primary_agent = build_agent(primary_model)
270
-
271
- # Fallback-Handled Runner Function
272
- def agent_runner(user_input):
273
- """
274
- This function takes user input, tries the primary agent,
275
- and switches to the backup agent on failure.
276
- """
277
- try:
278
- print("Attempting to run with the primary model...")
279
- result = primary_agent.run(user_input)
280
- if result is None or (isinstance(result, str) and result.strip() == ""):
281
- raise ValueError("Primary model returned an empty or null response.")
282
- return result
283
- except Exception as e:
284
- print(f"Primary model failed with error: {e}")
285
- print("Switching to the backup model...")
286
- try:
287
- backup_agent = build_agent(backup_model)
288
- result = backup_agent.run(user_input)
289
- if result is None or (isinstance(result, str) and result.strip() == ""):
290
- return "Backup model also returned an empty response."
291
- return result
292
- except Exception as e2:
293
- print(f"Backup model also failed with error: {e2}")
294
- return f"The backup model failed to generate a response: {e2}"
295
-
296
- # Launch Gradio with the runner function
297
- # We use the standard gr.Interface for this logic.
298
- iface = gr.Interface(
299
- fn=agent_runner,
300
- inputs="text",
301
- outputs="text",
302
- title="Agent with Fallback Logic",
303
- description="Enter your query. The system will use a primary model and switch to a backup if the primary fails."
304
  )
305
 
306
- iface.launch()
 
 
224
 
225
  final_answer = FinalAnswerTool()
226
 
 
227
 
 
 
228
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
229
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
230
 
 
 
 
 
 
 
231
  )
232
+ model = HfApiModel(
233
+ max_tokens=2096,
234
+ temperature=0.5,
235
+ model_id='sarvamai/sarvam-m',# it is possible that this model may be overloaded
 
236
  )
237
 
238
+
239
+ # Import tool from Hub
240
+ image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
241
+
242
+ with open("prompts.yaml", 'r') as stream:
243
+ prompt_templates = yaml.safe_load(stream)
244
+
245
+ agent = CodeAgent(
246
+ model=model,
247
+ tools=[final_answer], ## add your tools here (don't remove final answer)
248
+ max_steps=6,
249
+ verbosity_level=1,
250
+ grammar=None,
251
+ planning_interval=None,
252
+ name=None,
253
+ description=None,
254
+ prompt_templates=prompt_templates
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
  )
256
 
257
+
258
+ GradioUI(agent).launch()