Spaces:
Sleeping
Sleeping
File size: 3,751 Bytes
10e9b7d eccf8e4 7d65c66 3c4371f 3fd800d 6349023 c2a782d aba723a 8b6c38c aba723a 3898396 aba723a 2d114c5 aba723a 2d114c5 aba723a 2d114c5 8d12972 aba723a d92f8e7 3898396 2d114c5 9491d2f 8d12972 d92f8e7 8d12972 3898396 2d114c5 b28928e 2d114c5 961b19b aba723a 2d114c5 3a87a05 2d114c5 aba723a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
import os
import gradio as gr
import requests
import inspect
import pandas as pd
#import smolagents #to test
from smolagents import CodeAgent, InferenceClientModel, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
from huggingface_hub import InferenceClient
import json
from final_answer import FinalAnswerTool
api_url = "https://agents-course-unit4-scoring.hf.space"
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
class BasicAgent:
def __init__(self):
print("BasicAgent initialized.")
def __call__(self, question: str) -> str:
print(f"Agent received question (first 50 chars): {question[:50]}...")
fixed_answer = "This is a default answer."
print(f"Agent returning fixed answer: {fixed_answer}")
return fixed_answer
def load_questions_from_file(filepath="questions.json"):
try:
with open(filepath, "r", encoding="utf-8") as f:
questions_data = json.load(f)
if not questions_data:
print("Loaded file is empty.")
return "Loaded file is empty.", None
print(f"Loaded {len(questions_data)} questions from file.")
return "Loaded questions successfully.", questions_data
except FileNotFoundError:
print("File not found. Please run the API fetch first.")
return "File not found.", None
except json.JSONDecodeError as e:
print(f"Error decoding JSON: {e}")
return f"Error decoding JSON: {e}", None
except Exception as e:
print(f"Unexpected error: {e}")
return f"Unexpected error: {e}", None
#set up
#token
#Model
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)
#final_answer = FinalAnswerTool()
#Agent
agent_codeagent = CodeAgent(
model=model,
tools=[], #tools=[final_answer], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None
#prompt_templates=prompt_templates
)
#
def run_and_submit_one():
# 1. Instantiate Agent ( modify this part to create your agent)
try:
agent = BasicAgent()
#agent = agent_codeagent
except Exception as e:
print(f"Error instantiating agent: {e}")
return f"Error initializing agent: {e}", None
# 2. Fetch Questions by loading from local json
status_message, questions_data = load_questions_from_file()
if questions_data is not None and len(questions_data) > 0:
first_question = questions_data[0]
print("First question object:", first_question)
#To test
question_text = first_question.get("question")
task_id = first_question.get("task_id")
print(f"\nTask ID: {task_id}")
print(f"Question: {question_text}")
else:
print("No data found.")
# 3. Run your Agent
results_log = []
answers_payload = []
try:
submitted_answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
except Exception as e:
print(f"Error running agent on task {task_id}: {e}")
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
if not answers_payload:
print("Agent did not produce any answers to submit.")
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
run_and_submit_one()
|