UK1\CuongN
commited on
Commit
·
2a353e8
1
Parent(s):
81917a3
init
Browse files- agent.py +87 -0
- app.py +15 -14
- requirements.txt +6 -1
agent.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from llama_index.llms.openai import OpenAI
|
2 |
+
from llama_index.tools.wikipedia.base import WikipediaToolSpec
|
3 |
+
from llama_index.core.llms import ChatMessage
|
4 |
+
from llama_index.core.agent import ReActAgent
|
5 |
+
import logging
|
6 |
+
from llama_index.llms.deepinfra import DeepInfraLLM
|
7 |
+
import os
|
8 |
+
from llama_index.tools.tavily_research import TavilyToolSpec
|
9 |
+
from llama_index.core.prompts import PromptTemplate
|
10 |
+
import requests
|
11 |
+
import json
|
12 |
+
|
13 |
+
class CuongBasicAgent:
|
14 |
+
"""
|
15 |
+
Agent using LlamaIndex to fetch data from the web and answer GAIA benchmark questions.
|
16 |
+
"""
|
17 |
+
def __init__(self):
|
18 |
+
system_prompt = """
|
19 |
+
Value: You are an advanced assistant designed to help with a variety of tasks, including answering questions, providing summaries, and performing other types of analyses.
|
20 |
+
|
21 |
+
## Tools
|
22 |
+
|
23 |
+
You have access to a wide variety of tools. You are responsible for using the tools in any sequence you deem appropriate to complete the task at hand.
|
24 |
+
This may require breaking the task into subtasks and using different tools to complete each subtask.
|
25 |
+
|
26 |
+
You have access to the following tools:
|
27 |
+
{tool_desc}
|
28 |
+
|
29 |
+
|
30 |
+
## Output Format
|
31 |
+
|
32 |
+
Please answer in the same language as the question and use the following format:
|
33 |
+
|
34 |
+
```
|
35 |
+
Thought: The current language of the user is: (user's language). I need to use a tool to help me answer the question.
|
36 |
+
Action: tool name (one of {tool_names}) if using a tool.
|
37 |
+
Action Input: the input to the tool, in a JSON format representing the kwargs (e.g. {{"input": "hello world", "num_beams": 5}})
|
38 |
+
```
|
39 |
+
|
40 |
+
Please ALWAYS start with a Thought.
|
41 |
+
|
42 |
+
NEVER surround your response with markdown code markers. You may use code markers within your response if you need to.
|
43 |
+
|
44 |
+
Please use a valid JSON format for the Action Input. Do NOT do this {{'input': 'hello world', 'num_beams': 5}}.
|
45 |
+
|
46 |
+
If this format is used, the tool will respond in the following format:
|
47 |
+
|
48 |
+
```
|
49 |
+
Observation: tool response
|
50 |
+
```
|
51 |
+
|
52 |
+
You should keep repeating the above format till you have enough information to answer the question without using any more tools. At that point, you MUST respond in one of the following two formats:
|
53 |
+
|
54 |
+
```
|
55 |
+
Thought: I can answer without using any more tools. I'll use the user's language to answer
|
56 |
+
Answer: [your answer here (In the same language as the user's question)]
|
57 |
+
```
|
58 |
+
|
59 |
+
```
|
60 |
+
Thought: I cannot answer the question with the provided tools.
|
61 |
+
Answer: [your answer here (In the same language as the user's question)]
|
62 |
+
```
|
63 |
+
|
64 |
+
The answer should be concise and to the point. For example, if the answer is a number, just return the number without any additional text. If the question is "What is the capital of France?", the answer should be "Paris".
|
65 |
+
|
66 |
+
If the question includes guidelines regarding the format of the answer, please follow those guidelines faithfully
|
67 |
+
## Current Conversation
|
68 |
+
|
69 |
+
Below is the current conversation consisting of interleaving human and assistant messages.
|
70 |
+
"""
|
71 |
+
react_system_prompt = PromptTemplate(system_prompt)
|
72 |
+
#llm = DeepInfraLLM(
|
73 |
+
# model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",api_key=os.getenv("DEEPINFRA_API_KEY"))
|
74 |
+
|
75 |
+
llm = OpenAI(model='gpt-4.1')
|
76 |
+
agent = ReActAgent.from_tools(
|
77 |
+
llm=llm,
|
78 |
+
tools=WikipediaToolSpec().to_tool_list() + TavilyToolSpec(api_key=os.getenv('TAVILY_API_KEY')).to_tool_list(),
|
79 |
+
verbose=True,
|
80 |
+
)
|
81 |
+
|
82 |
+
agent.update_prompts({"agent_worker:system_prompt": react_system_prompt})
|
83 |
+
self.agent = agent
|
84 |
+
|
85 |
+
def __call__(self, question: str) -> str:
|
86 |
+
answer = self.agent.query(question)
|
87 |
+
return str(answer)
|
app.py
CHANGED
@@ -3,22 +3,12 @@ import gradio as gr
|
|
3 |
import requests
|
4 |
import inspect
|
5 |
import pandas as pd
|
|
|
6 |
|
7 |
# (Keep Constants as is)
|
8 |
# --- Constants ---
|
9 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
10 |
|
11 |
-
# --- Basic Agent Definition ---
|
12 |
-
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
13 |
-
class BasicAgent:
|
14 |
-
def __init__(self):
|
15 |
-
print("BasicAgent initialized.")
|
16 |
-
def __call__(self, question: str) -> str:
|
17 |
-
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
18 |
-
fixed_answer = "This is a default answer."
|
19 |
-
print(f"Agent returning fixed answer: {fixed_answer}")
|
20 |
-
return fixed_answer
|
21 |
-
|
22 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
23 |
"""
|
24 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
@@ -30,6 +20,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
30 |
if profile:
|
31 |
username= f"{profile.username}"
|
32 |
print(f"User logged in: {username}")
|
|
|
|
|
33 |
else:
|
34 |
print("User not logged in.")
|
35 |
return "Please Login to Hugging Face with the button.", None
|
@@ -37,10 +29,11 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
37 |
api_url = DEFAULT_API_URL
|
38 |
questions_url = f"{api_url}/questions"
|
39 |
submit_url = f"{api_url}/submit"
|
|
|
40 |
|
41 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
42 |
try:
|
43 |
-
agent =
|
44 |
except Exception as e:
|
45 |
print(f"Error instantiating agent: {e}")
|
46 |
return f"Error initializing agent: {e}", None
|
@@ -79,8 +72,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
79 |
if not task_id or question_text is None:
|
80 |
print(f"Skipping item with missing task_id or question: {item}")
|
81 |
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
try:
|
83 |
-
submitted_answer = agent(
|
84 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
85 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
86 |
except Exception as e:
|
@@ -99,7 +100,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
99 |
# 5. Submit
|
100 |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
101 |
try:
|
102 |
-
response = requests.post(submit_url, json=submission_data, timeout=
|
103 |
response.raise_for_status()
|
104 |
result_data = response.json()
|
105 |
final_status = (
|
|
|
3 |
import requests
|
4 |
import inspect
|
5 |
import pandas as pd
|
6 |
+
from agent import CuongBasicAgent
|
7 |
|
8 |
# (Keep Constants as is)
|
9 |
# --- Constants ---
|
10 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
13 |
"""
|
14 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
|
|
20 |
if profile:
|
21 |
username= f"{profile.username}"
|
22 |
print(f"User logged in: {username}")
|
23 |
+
if username != os.getenv("whoami"):
|
24 |
+
return f"You are not {os.getenv('whoami')}", None
|
25 |
else:
|
26 |
print("User not logged in.")
|
27 |
return "Please Login to Hugging Face with the button.", None
|
|
|
29 |
api_url = DEFAULT_API_URL
|
30 |
questions_url = f"{api_url}/questions"
|
31 |
submit_url = f"{api_url}/submit"
|
32 |
+
files_url = f"{api_url}/files/"
|
33 |
|
34 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
35 |
try:
|
36 |
+
agent = CuongBasicAgent()
|
37 |
except Exception as e:
|
38 |
print(f"Error instantiating agent: {e}")
|
39 |
return f"Error initializing agent: {e}", None
|
|
|
72 |
if not task_id or question_text is None:
|
73 |
print(f"Skipping item with missing task_id or question: {item}")
|
74 |
continue
|
75 |
+
|
76 |
+
# Manage questions with files
|
77 |
+
if item.get("file_name") and os.path.splitext(item.get("file_name"))[1] in ['.py', '.txt', '.json']:
|
78 |
+
file = requests.get(files_url+task_id, timeout=15).text
|
79 |
+
complete_question_text = f'{question_text}\nThis is the accompanying file:\n{file}'
|
80 |
+
else:
|
81 |
+
complete_question_text = question_text
|
82 |
+
|
83 |
try:
|
84 |
+
submitted_answer = agent(complete_question_text)
|
85 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
86 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
87 |
except Exception as e:
|
|
|
100 |
# 5. Submit
|
101 |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
|
102 |
try:
|
103 |
+
response = requests.post(submit_url, json=submission_data, timeout=180)
|
104 |
response.raise_for_status()
|
105 |
result_data = response.json()
|
106 |
final_status = (
|
requirements.txt
CHANGED
@@ -1,2 +1,7 @@
|
|
1 |
gradio
|
2 |
-
requests
|
|
|
|
|
|
|
|
|
|
|
|
1 |
gradio
|
2 |
+
requests
|
3 |
+
pandas
|
4 |
+
llama-index-llms-openai
|
5 |
+
llama-index-tools-wikipedia
|
6 |
+
llama-index-llms-deepinfra
|
7 |
+
llama-index-tools-tavily_research
|