File size: 4,835 Bytes
10e9b7d
 
eccf8e4
7d65c66
3c4371f
3fd800d
6349023
c2a782d
aba723a
8b6c38c
aba723a
 
 
 
 
3898396
 
 
 
 
 
 
 
 
aba723a
2d114c5
aba723a
2d114c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aba723a
2d114c5
 
 
 
8d12972
 
 
 
 
 
aba723a
9695569
3898396
2d114c5
9491d2f
8d12972
9695569
8d12972
 
 
 
 
 
 
 
3898396
2d114c5
82fec12
 
 
 
 
aba723a
82fec12
 
 
2d114c5
82fec12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3a87a05
335a818
aba723a
335a818
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import os
import gradio as gr
import requests
import inspect
import pandas as pd
#import smolagents  #to test
from smolagents import CodeAgent, InferenceClientModel, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
from huggingface_hub import InferenceClient
import json
from final_answer import FinalAnswerTool

api_url = "https://agents-course-unit4-scoring.hf.space"
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"

class BasicAgent:
    def __init__(self):
        print("BasicAgent initialized.")
    def __call__(self, question: str) -> str:
        print(f"Agent received question (first 50 chars): {question[:50]}...")
        
        fixed_answer = "This is a default answer."
        print(f"Agent returning fixed answer: {fixed_answer}")
        return fixed_answer

def load_questions_from_file(filepath="questions.json"):
    try:
        with open(filepath, "r", encoding="utf-8") as f:
            questions_data = json.load(f)
            if not questions_data:
                print("Loaded file is empty.")
                return "Loaded file is empty.", None
            print(f"Loaded {len(questions_data)} questions from file.")
            return "Loaded questions successfully.", questions_data
    except FileNotFoundError:
        print("File not found. Please run the API fetch first.")
        return "File not found.", None
    except json.JSONDecodeError as e:
        print(f"Error decoding JSON: {e}")
        return f"Error decoding JSON: {e}", None
    except Exception as e:
        print(f"Unexpected error: {e}")
        return f"Unexpected error: {e}", None

#set up
#token

#Model
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
custom_role_conversions=None,
)

final_answer = FinalAnswerTool()

#Agent
agent_codeagent = CodeAgent(
    model=model,
    tools=[final_answer], ## add your tools here (don't remove final answer)
    max_steps=6,
    verbosity_level=1,
    grammar=None,
    planning_interval=None,
    name=None,
    description=None
    #prompt_templates=prompt_templates
)


# #def run_and_submit_one():
#     # 1. Instantiate Agent ( modify this part to create your agent)
#     try:
#         #agent = BasicAgent()
#         agent = agent_codeagent
        
#     except Exception as e:
#         print(f"Error instantiating agent: {e}")
#         return f"Error initializing agent: {e}", None
    
#    # 2. Fetch Questions by loading from local json
#     status_message, questions_data = load_questions_from_file()

#     if questions_data is not None and len(questions_data) > 0:
#         first_question = questions_data[0]
#         print("First question object:", first_question)

#         #To test
#         question_text = first_question.get("question")
#         task_id = first_question.get("task_id")
#         print(f"\nTask ID: {task_id}")
#         print(f"Question: {question_text}")
#     else:
#         print("No data found.")

#     # 3. Run your Agent
#     results_log = []
#     answers_payload = []

#     try:
#         submitted_answer = agent(question_text)
#         answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
#         results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
#     except Exception as e:
#         print(f"Error running agent on task {task_id}: {e}")
#         results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})

#     if not answers_payload:
#         print("Agent did not produce any answers to submit.")
#         return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)

#run_and_submit_one()       

# Gradio handler that runs the agent
def run_once(state):
    if state is not None:
        return "Already run once. Refresh to rerun.", state

    status_message, questions_data = load_questions_from_file()
    if questions_data is None or len(questions_data) == 0:
        return "No questions found or failed to load.", None

    question = questions_data[0]
    question_text = question["question"]
    task_id = question["task_id"]

    try:
        answer = agent_codeagent(question_text)
        output = f"Answer to task {task_id}:\n{answer}"
        return output, output
    except Exception as e:
        return f"Error running agent: {e}", None

# Create Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("## Run AI Agent Once")

    output_text = gr.Textbox(label="Agent Output", lines=10)
    run_button = gr.Button("Run Agent")
    state = gr.State()  # cache variable to prevent re-runs

    run_button.click(fn=run_once, inputs=state, outputs=[output_text, state])

# Launch the interface
demo.launch()