|
import argparse |
|
import json |
|
from agent import BasicAgent |
|
from smolagents import LiteLLMModel |
|
from tools.utils import download_file |
|
|
|
|
|
if __name__ == '__main__': |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--id', required=False, action="append", help="Number of question to load") |
|
parser.add_argument('--max', required=False, type=int, default=10 , help="Number of max steps") |
|
answears = {} |
|
args = parser.parse_args() |
|
questions = [] |
|
question = None |
|
file_path = None |
|
base_url = 'https://agents-course-unit4-scoring.hf.space' |
|
|
|
with open('questions.json', 'r') as s: |
|
questions = json.load(s) |
|
if args.id: |
|
questions = [q for q in questions if q.get('task_id') in args.id] |
|
|
|
for question in questions: |
|
print(f"Process question: {question.get('question')[:50]}") |
|
file_name = question.get('file_name') |
|
prompt = question.get('question') |
|
task_id = question.get('task_id') |
|
|
|
if file_name: |
|
file_path = download_file(f'{base_url}/files/{task_id}', file_name) |
|
else: |
|
file_path = None |
|
|
|
model = LiteLLMModel( |
|
model_id="ollama/qwen2.5:7b", |
|
api_base="http://localhost:11434" |
|
) |
|
agent = BasicAgent(model, args.max) |
|
response = agent(prompt, file_path) |
|
answears[task_id] = response |
|
|
|
print(answears) |
|
|