File size: 3,075 Bytes
060f632
d1599df
 
7aab66d
d1599df
 
 
 
 
 
 
 
 
 
 
cc2c826
d1599df
 
 
 
 
 
cc2c826
d1599df
 
 
25b3bcb
d1599df
 
 
 
25b3bcb
060f632
d1599df
 
 
 
25b3bcb
d1599df
 
 
 
 
 
 
 
 
 
 
25b3bcb
d1599df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cc2c826
d1599df
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import os
import requests
from huggingface_hub import login, hf_hub_url
from datasets import load_dataset
from PIL import Image
from io import BytesIO
import gradio as gr
from transformers import pipeline

# Authenticate using HF token
login(token=os.environ["HF_TOKEN"])

# Helper to resolve image path
def resolve_image_url(path):
    return hf_hub_url(repo_id="Jize1/GTA", filename=path, repo_type="dataset")

# Download image from HF hub with token
def download_image(url):
    headers = {"Authorization": f"Bearer {os.environ['HF_TOKEN']}"}
    response = requests.get(url, headers=headers)
    image = Image.open(BytesIO(response.content)).convert("RGB")
    return image

# Load GTA dataset
print("Loading GTA dataset...")
gta_data = load_dataset("Jize1/GTA", split="train", use_auth_token=True)

# Load image captioning and OCR pipelines
print("Loading vision models...")
image_captioner = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
ocr_pipeline = pipeline("image-classification", model="microsoft/dit-base-finetuned-iiit5k")  # placeholder OCR

def evaluate_model(model_name):
    total = 0
    inst_acc = 0
    tool_acc = 0
    summ_acc = 0

    for example in gta_data.select(range(10)):  # limit to 10 for demo
        dialogs = example["dialogs"]
        gt_answer = example["gt_answer"]

        user_query = dialogs[0]["content"]
        files = example["files"]
        tool_calls = [d for d in dialogs if d.get("tool_calls")]

        image_path = files[0]["path"]
        image_url = resolve_image_url(image_path)
        image = download_image(image_url)

        # Fake tool execution: use captioner/ocr based on tool type
        result = ""
        for tool_call in tool_calls:
            tool = tool_call["tool_calls"][0]["function"]["name"]
            if tool == "ImageDescription":
                caption = image_captioner(image)[0]["generated_text"]
                result += f"[Caption] {caption}\n"
            elif tool == "OCR":
                result += f"[OCR] dummy OCR result for {image_path}\n"
            elif tool == "CountGivenObject":
                result += f"[Count] dummy count result\n"

        # Simulate metrics
        inst_acc += 1
        tool_acc += 1 if len(tool_calls) > 0 else 0
        summ_acc += 1 if gt_answer["whitelist"] else 0
        total += 1

    return {
        "InstAcc": round(inst_acc / total * 100, 2),
        "ToolAcc": round(tool_acc / total * 100, 2),
        "SummAcc": round(summ_acc / total * 100, 2)
    }


def run_evaluation(model_name):
    results = evaluate_model(model_name)
    return f"Results for {model_name}:\n" + "\n".join(f"{k}: {v}%" for k, v in results.items())

# Gradio UI
demo = gr.Interface(
    fn=run_evaluation,
    inputs=gr.Textbox(label="Hugging Face Model Name", placeholder="e.g. Qwen/Qwen2.5-3B"),
    outputs=gr.Textbox(label="GTA Evaluation Metrics"),
    title="GTA LLM Evaluation",
    description="Enter a model name from Hugging Face to simulate tool use and get GTA-style metrics.",
    allow_flagging="never"
)

demo.launch()