File size: 5,393 Bytes
10e9b7d eccf8e4 3c4371f c0b845d 10e9b7d e80aab9 3db6293 e80aab9 c0b845d 31243f4 c0b845d 31243f4 c0b845d 3c4371f c0b845d 3c4371f 7e4a06b 31243f4 e80aab9 31243f4 3c4371f 31243f4 eccf8e4 31243f4 7d65c66 31243f4 c0b845d 7d65c66 c0b845d e80aab9 7d65c66 c0b845d 3c4371f 31243f4 c0b845d 31243f4 c0b845d 31243f4 c0b845d 31243f4 c0b845d 31243f4 c0b845d e80aab9 c0b845d e80aab9 7d65c66 e80aab9 31243f4 c0b845d e80aab9 c0b845d e80aab9 c0b845d 7d65c66 c0b845d e80aab9 c0b845d e80aab9 c0b845d 0ee0419 e514fd7 c0b845d e514fd7 c0b845d e514fd7 e80aab9 7e4a06b 31243f4 e80aab9 9088b99 7d65c66 e80aab9 31243f4 e80aab9 c0b845d e80aab9 3c4371f c0b845d 7d65c66 3c4371f c0b845d 3c4371f c0b845d 7d65c66 c0b845d 7d65c66 c0b845d 3c4371f c0b845d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
import os
import gradio as gr
import requests
import pandas as pd
from transformers import pipeline
from typing import Optional
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# --- Smart Agent Definition ---
from transformers import pipeline
class BasicAgent:
def __init__(self):
print("Loading advanced model pipeline...")
# You can swap this with another model if you want (like mistralai/Mistral-7B-Instruct-v0.2 if you use HF Inference API)
self.generator = pipeline("text2text-generation", model="google/flan-t5-large")
def __call__(self, question: str) -> str:
try:
prompt = f"Answer the following question clearly and concisely:\n{question.strip()}"
response = self.generator(prompt, max_new_tokens=128, do_sample=False, temperature=0.0)
answer = response[0]["generated_text"].strip()
return answer
except Exception as e:
print(f"Agent failed to answer question: {e}")
return "ERROR"
# --- Submission Logic ---
def run_and_submit_all(profile: Optional[gr.OAuthProfile]):
space_id = os.getenv("SPACE_ID")
if not profile:
print("User not logged in.")
return "Please login to Hugging Face with the button.", None
username = profile.username.strip()
print(f"User logged in: {username}")
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
print(f"Agent code link: {agent_code}")
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
try:
agent = BasicAgent()
except Exception as e:
return f"Error initializing agent: {e}", None
print(f"Fetching questions from: {questions_url}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
if not questions_data:
return "Fetched questions list is empty.", None
except Exception as e:
return f"Error fetching questions: {e}", None
results_log = []
answers_payload = []
print(f"Running agent on {len(questions_data)} questions...")
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or not question_text:
continue
try:
answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submitted_answer": answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": answer})
except Exception as e:
error_msg = f"AGENT ERROR: {e}"
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": error_msg})
if not answers_payload:
return "No answers generated for submission.", pd.DataFrame(results_log)
submission_data = {
"username": username,
"agent_code": agent_code,
"answers": answers_payload
}
print(f"Submitting {len(answers_payload)} answers...")
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"β
Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')})\n"
f"Message: {result_data.get('message', 'No message')}"
)
return final_status, pd.DataFrame(results_log)
except Exception as e:
return f"β Submission failed: {e}", pd.DataFrame(results_log)
# --- Gradio Interface ---
with gr.Blocks() as demo:
gr.Markdown("# π€ Basic Agent Evaluation Runner")
gr.Markdown(
"""
**Instructions:**
1. Clone this space and implement your agent logic.
2. Log in with your Hugging Face account using the button below.
3. Click **Run Evaluation & Submit All Answers** to test and submit your agent.
---
β οΈ Note: The first run may take time depending on model and question count.
"""
)
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(
fn=run_and_submit_all,
outputs=[status_output, results_table]
)
# --- Run App ---
if __name__ == "__main__":
print("\n" + "-"*30 + " App Starting " + "-"*30)
space_host_startup = os.getenv("SPACE_HOST")
space_id_startup = os.getenv("SPACE_ID")
if space_host_startup:
print(f"β
SPACE_HOST: {space_host_startup}")
print(f"Runtime URL: https://{space_host_startup}.hf.space")
else:
print("βΉοΈ SPACE_HOST not set.")
if space_id_startup:
print(f"β
SPACE_ID: {space_id_startup}")
print(f"Repo: https://huggingface.co/spaces/{space_id_startup}")
else:
print("βΉοΈ SPACE_ID not set.")
print("-" * 80)
print("Launching Gradio App...")
demo.launch(debug=True, share=False)
|