update (#2)
Browse files- update (53214c0902c5bc6a66e7b9dd3ec470450acacc84)
Co-authored-by: iruno <iruno@users.noreply.huggingface.co>
This view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +6 -1
- BrowserGym/browsergym/assistantbench/src/browsergym/assistantbench/task.py +1 -1
- BrowserGym/browsergym/browsergym.egg-info/PKG-INFO +22 -0
- BrowserGym/browsergym/browsergym.egg-info/SOURCES.txt +6 -0
- BrowserGym/browsergym/browsergym.egg-info/dependency_links.txt +1 -0
- BrowserGym/browsergym/browsergym.egg-info/requires.txt +8 -0
- BrowserGym/browsergym/browsergym.egg-info/top_level.txt +1 -0
- BrowserGym/browsergym/core/src/browsergym/core/__pycache__/__init__.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/__pycache__/chat.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/__pycache__/constants.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/__pycache__/env.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/__pycache__/observation.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/__pycache__/registration.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/__pycache__/spaces.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/__pycache__/task.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/action/__pycache__/__init__.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/action/__pycache__/base.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/action/__pycache__/functions.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/action/__pycache__/highlevel.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/action/__pycache__/parsers.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/action/__pycache__/utils.cpython-311.pyc +0 -0
- BrowserGym/browsergym/core/src/browsergym/core/env.py +4 -1
- BrowserGym/browsergym/core/src/browsergym/core/task.py +1 -1
- BrowserGym/browsergym/core/src/browsergym/utils/__pycache__/obs.cpython-311.pyc +0 -0
- BrowserGym/browsergym/experiments/src/browsergym/experiments/__pycache__/__init__.cpython-311.pyc +0 -0
- BrowserGym/browsergym/experiments/src/browsergym/experiments/__pycache__/agent.cpython-311.pyc +0 -0
- BrowserGym/browsergym/experiments/src/browsergym/experiments/__pycache__/loop.cpython-311.pyc +0 -0
- BrowserGym/browsergym/experiments/src/browsergym/experiments/__pycache__/utils.cpython-311.pyc +0 -0
- BrowserGym/browsergym/visualwebarena/src/browsergym/visualwebarena/task.py +1 -1
- BrowserGym/browsergym/webarena/src/browsergym/webarena/task.py +1 -1
- Dockerfile +8 -1
- agent/__init__.py +0 -0
- agent/checklist.py +18 -0
- agent/mini_bench/__init__.py +0 -0
- agent/mini_bench/__pycache__/__init__.cpython-311.pyc +0 -0
- agent/mini_bench/__pycache__/agent.cpython-311.pyc +0 -0
- agent/mini_bench/__pycache__/reward_agent.cpython-311.pyc +0 -0
- agent/mini_bench/agent.py +467 -0
- agent/mini_bench/checklist_eval.py +95 -0
- agent/mini_bench/eval_utils.py +309 -0
- agent/mini_bench/inference_utils.py +87 -0
- agent/mini_bench/prompts/__init__.py +1 -0
- agent/mini_bench/prompts/__pycache__/__init__.cpython-311.pyc +0 -0
- agent/mini_bench/prompts/__pycache__/action.cpython-311.pyc +0 -0
- agent/mini_bench/prompts/__pycache__/checklist_prompt.cpython-311.pyc +0 -0
- agent/mini_bench/prompts/__pycache__/construct_messages.cpython-311.pyc +0 -0
- agent/mini_bench/prompts/__pycache__/eval_type.cpython-311.pyc +0 -0
- agent/mini_bench/prompts/__pycache__/image_utils.cpython-311.pyc +0 -0
- agent/mini_bench/prompts/__pycache__/input_information.cpython-311.pyc +0 -0
- agent/mini_bench/prompts/__pycache__/judge_prompt.cpython-311.pyc +0 -0
.gitignore
CHANGED
@@ -4,4 +4,9 @@
|
|
4 |
*.gif
|
5 |
*.bmp
|
6 |
*.tiff
|
7 |
-
*.ico
|
|
|
|
|
|
|
|
|
|
|
|
4 |
*.gif
|
5 |
*.bmp
|
6 |
*.tiff
|
7 |
+
*.ico
|
8 |
+
*.log
|
9 |
+
.gradio/
|
10 |
+
__pycache__/
|
11 |
+
.env
|
12 |
+
.venv/
|
BrowserGym/browsergym/assistantbench/src/browsergym/assistantbench/task.py
CHANGED
@@ -108,7 +108,7 @@ class AssistantBenchTask(AbstractBrowserTask):
|
|
108 |
|
109 |
def setup(self, page: Page) -> Tuple[str, dict]:
|
110 |
logger.info(f"Navigating to start url: {self.start_url}")
|
111 |
-
page.goto(self.start_url, timeout=
|
112 |
if self.save_predictions and self.output_file:
|
113 |
# create an empty task entry in the output file (will raise an Exception if the entry is already there)
|
114 |
add_prediction_to_jsonl(
|
|
|
108 |
|
109 |
def setup(self, page: Page) -> Tuple[str, dict]:
|
110 |
logger.info(f"Navigating to start url: {self.start_url}")
|
111 |
+
page.goto(self.start_url, timeout=50000)
|
112 |
if self.save_predictions and self.output_file:
|
113 |
# create an empty task entry in the output file (will raise an Exception if the entry is already there)
|
114 |
add_prediction_to_jsonl(
|
BrowserGym/browsergym/browsergym.egg-info/PKG-INFO
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.4
|
2 |
+
Name: browsergym
|
3 |
+
Version: 0.13.4
|
4 |
+
Summary: BrowserGym: a gym environment for web task automation in the Chromium browser
|
5 |
+
Author: Rim Assouel, Léo Boisvert, Massimo Caccia, Alex Drouin, Maxime Gasse, Imene Kerboua, Alex Lacoste, Thibault Le Sellier De Chezelles, Tom Marty, Aman Jaiswal
|
6 |
+
License: Apache-2.0
|
7 |
+
Classifier: Development Status :: 3 - Alpha
|
8 |
+
Classifier: Programming Language :: Python :: 3
|
9 |
+
Classifier: Operating System :: OS Independent
|
10 |
+
Classifier: Intended Audience :: Science/Research
|
11 |
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
12 |
+
Classifier: License :: OSI Approved :: Apache Software License
|
13 |
+
Requires-Python: >3.10
|
14 |
+
Description-Content-Type: text/markdown
|
15 |
+
Requires-Dist: browsergym-core==0.13.4
|
16 |
+
Requires-Dist: browsergym-miniwob==0.13.4
|
17 |
+
Requires-Dist: browsergym-webarena==0.13.4
|
18 |
+
Requires-Dist: browsergym-visualwebarena==0.13.4
|
19 |
+
Requires-Dist: browsergym-assistantbench==0.13.4
|
20 |
+
Requires-Dist: browsergym-experiments==0.13.4
|
21 |
+
Requires-Dist: browsergym-workarena>=0.4.1
|
22 |
+
Requires-Dist: weblinx-browsergym>=0.0.2
|
BrowserGym/browsergym/browsergym.egg-info/SOURCES.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pyproject.toml
|
2 |
+
browsergym.egg-info/PKG-INFO
|
3 |
+
browsergym.egg-info/SOURCES.txt
|
4 |
+
browsergym.egg-info/dependency_links.txt
|
5 |
+
browsergym.egg-info/requires.txt
|
6 |
+
browsergym.egg-info/top_level.txt
|
BrowserGym/browsergym/browsergym.egg-info/dependency_links.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
|
BrowserGym/browsergym/browsergym.egg-info/requires.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
browsergym-core==0.13.4
|
2 |
+
browsergym-miniwob==0.13.4
|
3 |
+
browsergym-webarena==0.13.4
|
4 |
+
browsergym-visualwebarena==0.13.4
|
5 |
+
browsergym-assistantbench==0.13.4
|
6 |
+
browsergym-experiments==0.13.4
|
7 |
+
browsergym-workarena>=0.4.1
|
8 |
+
weblinx-browsergym>=0.0.2
|
BrowserGym/browsergym/browsergym.egg-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
|
BrowserGym/browsergym/core/src/browsergym/core/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (1.14 kB). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/__pycache__/chat.cpython-311.pyc
ADDED
Binary file (6.89 kB). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/__pycache__/constants.cpython-311.pyc
ADDED
Binary file (428 Bytes). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/__pycache__/env.cpython-311.pyc
ADDED
Binary file (31.2 kB). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/__pycache__/observation.cpython-311.pyc
ADDED
Binary file (22.7 kB). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/__pycache__/registration.cpython-311.pyc
ADDED
Binary file (3.49 kB). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/__pycache__/spaces.cpython-311.pyc
ADDED
Binary file (8.42 kB). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/__pycache__/task.cpython-311.pyc
ADDED
Binary file (5.53 kB). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/action/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (561 Bytes). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/action/__pycache__/base.cpython-311.pyc
ADDED
Binary file (3.12 kB). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/action/__pycache__/functions.cpython-311.pyc
ADDED
Binary file (26.2 kB). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/action/__pycache__/highlevel.cpython-311.pyc
ADDED
Binary file (12.4 kB). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/action/__pycache__/parsers.cpython-311.pyc
ADDED
Binary file (6.82 kB). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/action/__pycache__/utils.cpython-311.pyc
ADDED
Binary file (12.2 kB). View file
|
|
BrowserGym/browsergym/core/src/browsergym/core/env.py
CHANGED
@@ -27,6 +27,7 @@ from .observation import (
|
|
27 |
)
|
28 |
from .spaces import AnyBox, AnyDict, Float, Unicode
|
29 |
from .task import AbstractBrowserTask
|
|
|
30 |
|
31 |
logger = logging.getLogger(__name__)
|
32 |
|
@@ -602,6 +603,8 @@ document.addEventListener("visibilitychange", () => {
|
|
602 |
_post_extract(self.page)
|
603 |
|
604 |
# obs is generic to all tasks
|
|
|
|
|
605 |
obs = {
|
606 |
"chat_messages": tuple(copy.deepcopy(self.chat.messages)),
|
607 |
"goal": _try_to_extract_legacy_goal(self.goal_object), # legacy goal, deprecated
|
@@ -612,7 +615,7 @@ document.addEventListener("visibilitychange", () => {
|
|
612 |
"open_pages_titles": tuple(page.title() for page in self.context.pages),
|
613 |
"active_page_index": np.asarray([self.context.pages.index(self.page)]),
|
614 |
"url": self.page.url, # redundant with "open_pages_urls" and "active_page_index"
|
615 |
-
"
|
616 |
"dom_object": dom,
|
617 |
"axtree_object": axtree,
|
618 |
"extra_element_properties": extra_properties,
|
|
|
27 |
)
|
28 |
from .spaces import AnyBox, AnyDict, Float, Unicode
|
29 |
from .task import AbstractBrowserTask
|
30 |
+
from ..utils.obs import overlay_som, flatten_axtree_to_str
|
31 |
|
32 |
logger = logging.getLogger(__name__)
|
33 |
|
|
|
603 |
_post_extract(self.page)
|
604 |
|
605 |
# obs is generic to all tasks
|
606 |
+
screenshot_np_array = extract_screenshot(self.page)
|
607 |
+
som_screenshot_np_array = overlay_som(screenshot_np_array, extra_properties)
|
608 |
obs = {
|
609 |
"chat_messages": tuple(copy.deepcopy(self.chat.messages)),
|
610 |
"goal": _try_to_extract_legacy_goal(self.goal_object), # legacy goal, deprecated
|
|
|
615 |
"open_pages_titles": tuple(page.title() for page in self.context.pages),
|
616 |
"active_page_index": np.asarray([self.context.pages.index(self.page)]),
|
617 |
"url": self.page.url, # redundant with "open_pages_urls" and "active_page_index"
|
618 |
+
"som_screenshot": som_screenshot_np_array,
|
619 |
"dom_object": dom,
|
620 |
"axtree_object": axtree,
|
621 |
"extra_element_properties": extra_properties,
|
BrowserGym/browsergym/core/src/browsergym/core/task.py
CHANGED
@@ -92,7 +92,7 @@ class OpenEndedTask(AbstractBrowserTask):
|
|
92 |
self.goal = goal
|
93 |
|
94 |
def setup(self, page: playwright.sync_api.Page) -> tuple[str, dict]:
|
95 |
-
page.goto(self.start_url, timeout=
|
96 |
return self.goal, {}
|
97 |
|
98 |
def teardown(self) -> None:
|
|
|
92 |
self.goal = goal
|
93 |
|
94 |
def setup(self, page: playwright.sync_api.Page) -> tuple[str, dict]:
|
95 |
+
page.goto(self.start_url, timeout=50000)
|
96 |
return self.goal, {}
|
97 |
|
98 |
def teardown(self) -> None:
|
BrowserGym/browsergym/core/src/browsergym/utils/__pycache__/obs.cpython-311.pyc
ADDED
Binary file (19.3 kB). View file
|
|
BrowserGym/browsergym/experiments/src/browsergym/experiments/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (446 Bytes). View file
|
|
BrowserGym/browsergym/experiments/src/browsergym/experiments/__pycache__/agent.cpython-311.pyc
ADDED
Binary file (6.58 kB). View file
|
|
BrowserGym/browsergym/experiments/src/browsergym/experiments/__pycache__/loop.cpython-311.pyc
ADDED
Binary file (55.2 kB). View file
|
|
BrowserGym/browsergym/experiments/src/browsergym/experiments/__pycache__/utils.cpython-311.pyc
ADDED
Binary file (2.18 kB). View file
|
|
BrowserGym/browsergym/visualwebarena/src/browsergym/visualwebarena/task.py
CHANGED
@@ -109,7 +109,7 @@ class GenericVisualWebArenaTask(AbstractBrowserTask):
|
|
109 |
# task properties, will be used to set up the browsergym environment
|
110 |
self.viewport = {"width": 1280, "height": 720}
|
111 |
self.slow_mo = 1000 # ms
|
112 |
-
self.timeout =
|
113 |
|
114 |
self.webarena_instance = VisualWebArenaInstance()
|
115 |
self.config_file: str = None
|
|
|
109 |
# task properties, will be used to set up the browsergym environment
|
110 |
self.viewport = {"width": 1280, "height": 720}
|
111 |
self.slow_mo = 1000 # ms
|
112 |
+
self.timeout = 50000 # ms
|
113 |
|
114 |
self.webarena_instance = VisualWebArenaInstance()
|
115 |
self.config_file: str = None
|
BrowserGym/browsergym/webarena/src/browsergym/webarena/task.py
CHANGED
@@ -34,7 +34,7 @@ class GenericWebArenaTask(AbstractBrowserTask):
|
|
34 |
# task properties, will be used to set up the browsergym environment
|
35 |
self.viewport = {"width": 1280, "height": 720}
|
36 |
self.slow_mo = 1000 # ms
|
37 |
-
self.timeout =
|
38 |
|
39 |
self.webarena_instance = WebArenaInstance()
|
40 |
self.config_file: str = None
|
|
|
34 |
# task properties, will be used to set up the browsergym environment
|
35 |
self.viewport = {"width": 1280, "height": 720}
|
36 |
self.slow_mo = 1000 # ms
|
37 |
+
self.timeout = 50000 # ms
|
38 |
|
39 |
self.webarena_instance = WebArenaInstance()
|
40 |
self.config_file: str = None
|
Dockerfile
CHANGED
@@ -56,6 +56,11 @@ RUN curl -fsSL https://dl.google.com/linux/linux_signing_key.pub | gpg --dearmor
|
|
56 |
# Set up working directory
|
57 |
WORKDIR /app
|
58 |
|
|
|
|
|
|
|
|
|
|
|
59 |
# Copy requirements and install Python dependencies
|
60 |
COPY requirements.txt .
|
61 |
RUN pip install --no-cache-dir -r requirements.txt
|
@@ -94,6 +99,8 @@ ENV RESOLUTION_HEIGHT=1080
|
|
94 |
# COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
95 |
|
96 |
# EXPOSE 7788 6080 5900
|
|
|
|
|
97 |
|
98 |
# CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
|
99 |
-
|
|
|
56 |
# Set up working directory
|
57 |
WORKDIR /app
|
58 |
|
59 |
+
COPY templates/ templates/
|
60 |
+
COPY browser_agent.py .
|
61 |
+
COPY process_run.py .
|
62 |
+
COPY utils.py .
|
63 |
+
|
64 |
# Copy requirements and install Python dependencies
|
65 |
COPY requirements.txt .
|
66 |
RUN pip install --no-cache-dir -r requirements.txt
|
|
|
99 |
# COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
100 |
|
101 |
# EXPOSE 7788 6080 5900
|
102 |
+
EXPOSE 7860
|
103 |
+
ENV GRADIO_SERVER_NAME="0.0.0.0"
|
104 |
|
105 |
# CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
|
106 |
+
CMD ["python", "app.py"]
|
agent/__init__.py
ADDED
File without changes
|
agent/checklist.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .mini_bench.agent import ChecklistGenerationAgent
|
2 |
+
|
3 |
+
def generate_checklist(**data):
|
4 |
+
# data: 'intent', 'start_url', 'text_observation'
|
5 |
+
agent_config = {
|
6 |
+
'model_name': 'WPRM/qwen-3b-ar-reward-cot-mtl-checklist-enhanced',
|
7 |
+
'base_url': 'http://165.132.144.84:7701/v1',
|
8 |
+
'api_key': 'empty',
|
9 |
+
'temperature': 0.7,
|
10 |
+
'use_log_probs': True,
|
11 |
+
'use_checklist': True,
|
12 |
+
'use_multimodal': False,
|
13 |
+
'num_generate': 1,
|
14 |
+
}
|
15 |
+
checklist_generation_agent = ChecklistGenerationAgent(agent_config)
|
16 |
+
response_list, cost = checklist_generation_agent.generate_response(data, prompt_type='ours', constraint_str_list=["<think>", "</think>", "<answer>", "</answer>"])
|
17 |
+
response = response_list[0]
|
18 |
+
return response.split("<answer>")[-1].split("</answer>")[0].strip()
|
agent/mini_bench/__init__.py
ADDED
File without changes
|
agent/mini_bench/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (186 Bytes). View file
|
|
agent/mini_bench/__pycache__/agent.cpython-311.pyc
ADDED
Binary file (20.7 kB). View file
|
|
agent/mini_bench/__pycache__/reward_agent.cpython-311.pyc
ADDED
Binary file (21.3 kB). View file
|
|
agent/mini_bench/agent.py
ADDED
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC, abstractmethod
|
2 |
+
import time
|
3 |
+
import requests
|
4 |
+
import json
|
5 |
+
import math
|
6 |
+
from langsmith import Client
|
7 |
+
from langchain_openai import ChatOpenAI
|
8 |
+
|
9 |
+
from .prompts import get_messages
|
10 |
+
from .prompts.judge_prompt import (
|
11 |
+
JUDGE_OURS_BT_MODELING_PROMPT_TEMPLATE,
|
12 |
+
JUDGE_OURS_BT_MODELING_WO_CHECKLIST_PROMPT_TEMPLATE,
|
13 |
+
JUDGE_OURS_BT_MODELING_MULTIMODAL_PROMPT_TEMPLATE,
|
14 |
+
JUDGE_OURS_BT_MODELING_MULTIMODAL_WO_CHECKLIST_PROMPT_TEMPLATE
|
15 |
+
)
|
16 |
+
from .prompts.image_utils import image_to_base64_url
|
17 |
+
|
18 |
+
MAX_RETRY = 3
|
19 |
+
RETRY_SLEEP = 5
|
20 |
+
MODEL_COST_MAPPING = {
|
21 |
+
"gpt-4o-mini": {
|
22 |
+
"input_token_cost": 0.15,
|
23 |
+
"output_token_cost": 0.6
|
24 |
+
},
|
25 |
+
"gpt-4o": {
|
26 |
+
"input_token_cost": 2.5,
|
27 |
+
"output_token_cost": 10
|
28 |
+
},
|
29 |
+
}
|
30 |
+
|
31 |
+
|
32 |
+
class Agent(ABC):
|
33 |
+
@abstractmethod
|
34 |
+
def generate_response(self, inputs: dict) -> str:
|
35 |
+
pass
|
36 |
+
|
37 |
+
class BaseAgent(Agent):
|
38 |
+
def __init__(self, agent_config: dict):
|
39 |
+
self.agent_config = agent_config
|
40 |
+
self._setup()
|
41 |
+
|
42 |
+
def _setup(self):
|
43 |
+
use_log_probs = self.agent_config.get("use_log_probs", False)
|
44 |
+
if use_log_probs:
|
45 |
+
self.llm = ChatOpenAI(
|
46 |
+
model=self.agent_config["model_name"],
|
47 |
+
base_url=self.agent_config["base_url"],
|
48 |
+
api_key=self.agent_config["api_key"],
|
49 |
+
temperature=self.agent_config["temperature"],
|
50 |
+
timeout=300,
|
51 |
+
logprobs=True,
|
52 |
+
top_logprobs=10
|
53 |
+
)
|
54 |
+
else:
|
55 |
+
self.llm = ChatOpenAI(
|
56 |
+
model=self.agent_config["model_name"],
|
57 |
+
base_url=self.agent_config["base_url"],
|
58 |
+
api_key=self.agent_config["api_key"],
|
59 |
+
temperature=self.agent_config["temperature"],
|
60 |
+
timeout=300
|
61 |
+
)
|
62 |
+
self.temperature = self.agent_config["temperature"]
|
63 |
+
self.num_generate = self.agent_config["num_generate"]
|
64 |
+
self.use_checklist = self.agent_config.get("use_checklist", False)
|
65 |
+
self.use_multimodal = self.agent_config.get("use_multimodal", False)
|
66 |
+
|
67 |
+
# setup cost
|
68 |
+
model_cost = MODEL_COST_MAPPING.get(self.agent_config["model_name"], None)
|
69 |
+
if model_cost and "api" in self.agent_config["base_url"]:
|
70 |
+
self.input_token_cost = model_cost["input_token_cost"]
|
71 |
+
self.output_token_cost = model_cost["output_token_cost"]
|
72 |
+
else:
|
73 |
+
self.input_token_cost = 0.0
|
74 |
+
self.output_token_cost = 0.0
|
75 |
+
|
76 |
+
def generate_with_retry(self, model_input, constraint_str_list: list = None):
|
77 |
+
total_input_tokens = 0
|
78 |
+
total_output_tokens = 0
|
79 |
+
if self.temperature == 0:
|
80 |
+
response = self.llm.invoke(model_input)
|
81 |
+
total_input_tokens += response.response_metadata["token_usage"]["prompt_tokens"]
|
82 |
+
total_output_tokens += response.response_metadata["token_usage"]["completion_tokens"]
|
83 |
+
else:
|
84 |
+
for i in range(MAX_RETRY):
|
85 |
+
try:
|
86 |
+
response = self.llm.invoke(model_input)
|
87 |
+
total_input_tokens += response.response_metadata["token_usage"]["prompt_tokens"]
|
88 |
+
total_output_tokens += response.response_metadata["token_usage"]["completion_tokens"]
|
89 |
+
if constraint_str_list:
|
90 |
+
pass_constraint_num = 0
|
91 |
+
for constraint_str in constraint_str_list:
|
92 |
+
if constraint_str in response.content:
|
93 |
+
pass_constraint_num += 1
|
94 |
+
if pass_constraint_num == len(constraint_str_list):
|
95 |
+
break
|
96 |
+
else:
|
97 |
+
print(f"Agent has fomat issue, retry... {i+1}/{MAX_RETRY}")
|
98 |
+
print(response.content)
|
99 |
+
else:
|
100 |
+
break
|
101 |
+
except Exception as e:
|
102 |
+
print(f"Agent returned an Error: {e}")
|
103 |
+
response = None
|
104 |
+
time.sleep(RETRY_SLEEP)
|
105 |
+
|
106 |
+
cost = self.input_token_cost * total_input_tokens / 1000000 + self.output_token_cost * total_output_tokens / 1000000
|
107 |
+
|
108 |
+
if response is None:
|
109 |
+
return "", cost
|
110 |
+
else:
|
111 |
+
return response.content, cost
|
112 |
+
|
113 |
+
def prepare_message(self, model_input: dict, prompt_type: str):
|
114 |
+
message = []
|
115 |
+
return message
|
116 |
+
|
117 |
+
def generate_response(self, model_input: dict, prompt_type: str, constraint_str_list: list = None,):
|
118 |
+
total_cost = 0
|
119 |
+
response_list = []
|
120 |
+
# prepare message
|
121 |
+
message = self.prepare_message(model_input, prompt_type)
|
122 |
+
# print(message)
|
123 |
+
|
124 |
+
# n sampling
|
125 |
+
for i in range(self.num_generate):
|
126 |
+
response, cost = self.generate_with_retry(message, constraint_str_list)
|
127 |
+
response_list.append(response)
|
128 |
+
total_cost += cost
|
129 |
+
|
130 |
+
return response_list, total_cost
|
131 |
+
|
132 |
+
|
133 |
+
class GroundingJudgeAgent(BaseAgent):
|
134 |
+
def __init__(self, agent_config: dict):
|
135 |
+
super().__init__(agent_config)
|
136 |
+
self._setup()
|
137 |
+
|
138 |
+
def prepare_message(self, model_input: dict, prompt_type):
|
139 |
+
message = get_messages(
|
140 |
+
input_info=model_input,
|
141 |
+
inference_mode="judge_grounding",
|
142 |
+
prompt_type=prompt_type,
|
143 |
+
use_multimodal=self.use_multimodal,
|
144 |
+
text_obs=self.agent_config["text_obs_type"],
|
145 |
+
image_obs=self.agent_config["image_obs_type"]
|
146 |
+
)
|
147 |
+
return message
|
148 |
+
|
149 |
+
|
150 |
+
class ProgressJudgeAgent(BaseAgent):
|
151 |
+
def __init__(self, agent_config: dict):
|
152 |
+
super().__init__(agent_config)
|
153 |
+
self._setup()
|
154 |
+
|
155 |
+
def prepare_message(self, model_input: dict, prompt_type):
|
156 |
+
if self.agent_config["input_type"]=="text_only":
|
157 |
+
use_multimodal = False
|
158 |
+
text_obs = self.agent_config["text_obs_type"]
|
159 |
+
image_obs = None
|
160 |
+
elif self.agent_config["input_type"]=="image_only":
|
161 |
+
use_multimodal = True
|
162 |
+
text_obs = None
|
163 |
+
image_obs = self.agent_config["image_obs_type"]
|
164 |
+
elif self.agent_config["input_type"]=="text_image":
|
165 |
+
use_multimodal = True
|
166 |
+
text_obs = self.agent_config["text_obs_type"]
|
167 |
+
image_obs = self.agent_config["image_obs_type"]
|
168 |
+
else:
|
169 |
+
raise ValueError(f"Invalid input type: {self.agent_config['input_type']}")
|
170 |
+
|
171 |
+
if self.agent_config["use_in_progress"]:
|
172 |
+
use_in_progress = True
|
173 |
+
else:
|
174 |
+
use_in_progress = False
|
175 |
+
|
176 |
+
message = get_messages(
|
177 |
+
input_info=model_input,
|
178 |
+
inference_mode="judge_progress",
|
179 |
+
prompt_type=prompt_type,
|
180 |
+
use_checklist=self.use_checklist,
|
181 |
+
use_multimodal=use_multimodal,
|
182 |
+
text_obs=text_obs,
|
183 |
+
image_obs=image_obs,
|
184 |
+
use_in_progress=use_in_progress
|
185 |
+
)
|
186 |
+
return message
|
187 |
+
|
188 |
+
def add_logprob(self, ori_logprob: float, add_logprob: float):
|
189 |
+
if ori_logprob is None:
|
190 |
+
return add_logprob
|
191 |
+
else:
|
192 |
+
ori_prob = math.exp(ori_logprob)
|
193 |
+
add_prob = math.exp(add_logprob)
|
194 |
+
return math.log(ori_prob + add_prob)
|
195 |
+
|
196 |
+
def get_judge_probs(self, logprobs: list):
|
197 |
+
# target_judge = {
|
198 |
+
# "yes": [" Yes", "Yes"],
|
199 |
+
# "no": [" No", "No"],
|
200 |
+
# "in": [" In", "In"]
|
201 |
+
# }
|
202 |
+
target_judge = {
|
203 |
+
"yes": [
|
204 |
+
" Yes", "ĠYes", "Yes", "ĊYes",
|
205 |
+
"Ġyes", "yes", "Ċyes",
|
206 |
+
"ĠYES", "YES", "ĊYES",
|
207 |
+
"ĠDone", "Done", "ĊDone",
|
208 |
+
"ĠCompleted", "Completed", "ĊCompleted",
|
209 |
+
"ĠCorrect", "Correct", "ĊCorrect"
|
210 |
+
],
|
211 |
+
"no": [
|
212 |
+
" No", "ĠNo", "No", "ĊNo",
|
213 |
+
"ĠNO", "NO", "ĊNO",
|
214 |
+
"ĠNot", "Not", "ĊNot",
|
215 |
+
"ĠNone", "None", "ĊNone",
|
216 |
+
"ĠNope", "Nope", "ĊNope",
|
217 |
+
"ĠUn", "Un", "ĊUn",
|
218 |
+
"ĠWrong", "Wrong", "ĊWrong"
|
219 |
+
],
|
220 |
+
"in": [
|
221 |
+
" In", "ĠIn", "In", "ĊIn",
|
222 |
+
"ĠPending", "Pending", "ĊPending",
|
223 |
+
"ĠPart", "Part", "ĊPart",
|
224 |
+
"ĠPartial", "Partial", "ĊPartial",
|
225 |
+
"ĠInProgress", "InProgress", "ĊInProgress"
|
226 |
+
]
|
227 |
+
}
|
228 |
+
response_str = ""
|
229 |
+
judge_probs_list = []
|
230 |
+
# print(logprobs)
|
231 |
+
for i, log_prob in enumerate(logprobs):
|
232 |
+
# Start to find judge string
|
233 |
+
if "<answer>" in response_str:
|
234 |
+
find_judge_str = None
|
235 |
+
for judge_type in target_judge:
|
236 |
+
if log_prob["token"] in target_judge[judge_type]:
|
237 |
+
# print(log_prob)
|
238 |
+
find_judge_str = judge_type
|
239 |
+
break
|
240 |
+
if find_judge_str:
|
241 |
+
# print("find judge str")
|
242 |
+
token_judge_dict = {
|
243 |
+
"yes": None,
|
244 |
+
"no": None,
|
245 |
+
"in": None
|
246 |
+
}
|
247 |
+
if "top_logprobs" in log_prob:
|
248 |
+
for token_info in log_prob["top_logprobs"]:
|
249 |
+
for judge_type in target_judge:
|
250 |
+
for judge_str in target_judge[judge_type]:
|
251 |
+
# if judge_str in token_info["token"] and token_info["logprob"] > token_judge_dict[judge_type]:
|
252 |
+
# token_judge_dict[judge_type] = token_info["logprob"]
|
253 |
+
if judge_str in token_info["token"]:
|
254 |
+
# print(token_info["logprob"])
|
255 |
+
token_judge_dict[judge_type] = self.add_logprob(token_judge_dict[judge_type], token_info["logprob"])
|
256 |
+
# for None case
|
257 |
+
for judge_type in token_judge_dict:
|
258 |
+
if token_judge_dict[judge_type] is None:
|
259 |
+
token_judge_dict[judge_type] = float("-inf")
|
260 |
+
judge_probs_list.append(token_judge_dict)
|
261 |
+
else:
|
262 |
+
# for vllm bugs : no top_logprobs
|
263 |
+
for judge_type in token_judge_dict:
|
264 |
+
if judge_type == find_judge_str:
|
265 |
+
token_judge_dict[judge_type] = log_prob["logprob"]
|
266 |
+
else:
|
267 |
+
token_judge_dict[judge_type] = float("-inf")
|
268 |
+
judge_probs_list.append(token_judge_dict)
|
269 |
+
# print(token_judge_dict)
|
270 |
+
|
271 |
+
if "</answer>" in response_str:
|
272 |
+
break
|
273 |
+
|
274 |
+
response_str += log_prob["token"]
|
275 |
+
# print(response_str.replace("Ġ", " ").replace("Ċ", "\n"))
|
276 |
+
# print(judge_probs_list)
|
277 |
+
if len(judge_probs_list) == 0:
|
278 |
+
return [{
|
279 |
+
"yes": 0.0,
|
280 |
+
"no": 0.0,
|
281 |
+
"in": 0.0
|
282 |
+
}]
|
283 |
+
else:
|
284 |
+
# convert with softmax
|
285 |
+
final_judge_probs_list = []
|
286 |
+
for judge_probs in judge_probs_list:
|
287 |
+
exp_logprobs = [math.exp(x) for x in [judge_probs["yes"], judge_probs["no"], judge_probs["in"]]]
|
288 |
+
sum_exp_logprobs = sum(exp_logprobs)
|
289 |
+
softmax_probs = [x / sum_exp_logprobs for x in exp_logprobs]
|
290 |
+
final_judge_probs_list.append({
|
291 |
+
"yes": softmax_probs[0],
|
292 |
+
"no": softmax_probs[1],
|
293 |
+
"in": softmax_probs[2]
|
294 |
+
})
|
295 |
+
return final_judge_probs_list
|
296 |
+
|
297 |
+
def generate_probs(self, model_input: dict, prompt_type: str):
|
298 |
+
total_cost = 0
|
299 |
+
response_list = []
|
300 |
+
# prepare message
|
301 |
+
message = self.prepare_message(model_input, prompt_type)
|
302 |
+
# print(message)
|
303 |
+
|
304 |
+
for i in range(self.num_generate):
|
305 |
+
try:
|
306 |
+
response = self.llm.invoke(message)
|
307 |
+
total_input_tokens = response.response_metadata["token_usage"]["prompt_tokens"]
|
308 |
+
total_output_tokens = response.response_metadata["token_usage"]["completion_tokens"]
|
309 |
+
total_cost = self.input_token_cost * total_input_tokens / 1000000 + self.output_token_cost * total_output_tokens / 1000000
|
310 |
+
logprobs = response.response_metadata["logprobs"]["content"]
|
311 |
+
response_list.append(
|
312 |
+
{
|
313 |
+
"response": response.content,
|
314 |
+
"judge_probs": self.get_judge_probs(logprobs)
|
315 |
+
}
|
316 |
+
)
|
317 |
+
except Exception as e:
|
318 |
+
print(f"Error: {e}")
|
319 |
+
# print(response.response_metadata["logprobs"])
|
320 |
+
response_list.append(
|
321 |
+
{
|
322 |
+
"response": response.content,
|
323 |
+
"judge_probs": []
|
324 |
+
}
|
325 |
+
)
|
326 |
+
return response_list, total_cost
|
327 |
+
|
328 |
+
|
329 |
+
class ChecklistGenerationAgent(BaseAgent):
|
330 |
+
def __init__(self, agent_config: dict):
|
331 |
+
super().__init__(agent_config)
|
332 |
+
self._setup()
|
333 |
+
|
334 |
+
def prepare_message(self, model_input: dict, prompt_type):
|
335 |
+
message = get_messages(
|
336 |
+
input_info=model_input,
|
337 |
+
inference_mode="checklist_generation",
|
338 |
+
prompt_type=prompt_type
|
339 |
+
)
|
340 |
+
return message
|
341 |
+
|
342 |
+
|
343 |
+
class ClassifierRewardAgent(Agent):
|
344 |
+
def __init__(self, url: str, use_checklist: bool = False, use_multimodal: bool = False):
|
345 |
+
self.url = url
|
346 |
+
self.use_checklist = use_checklist
|
347 |
+
self.use_multimodal = use_multimodal
|
348 |
+
|
349 |
+
def _process_multimodal_message(self, prompt: str, image_list: list[str]):
|
350 |
+
multimodal_message = []
|
351 |
+
text_prompt_prefix = prompt.split("<IMAGE_PLACEHOLDER>")[0]
|
352 |
+
text_prompt_suffix = prompt.split("<IMAGE_PLACEHOLDER>")[1]
|
353 |
+
multimodal_message = [
|
354 |
+
{"type": "text", "text": text_prompt_prefix},
|
355 |
+
# {"type": "image_url", "image_url": {"url": image_to_base64_url(image_list[0])}},
|
356 |
+
{"type": "image", "image": image_to_base64_url(image_list[0])},
|
357 |
+
{"type": "text", "text": text_prompt_suffix}
|
358 |
+
]
|
359 |
+
return multimodal_message
|
360 |
+
|
361 |
+
def _make_query(self, user_prompt_template: dict, model_input: dict | list[dict]):
|
362 |
+
if self.use_multimodal:
|
363 |
+
tmp_user_prompt = user_prompt_template["user"].format(
|
364 |
+
**model_input
|
365 |
+
)
|
366 |
+
user_prompt = self._process_multimodal_message(tmp_user_prompt, model_input["image_list"])
|
367 |
+
else:
|
368 |
+
user_prompt = user_prompt_template["user"].format(
|
369 |
+
**model_input
|
370 |
+
)
|
371 |
+
assistant_prompt = user_prompt_template["assistant"].format(
|
372 |
+
**model_input
|
373 |
+
)
|
374 |
+
query = [
|
375 |
+
{"role": "user", "content": user_prompt},
|
376 |
+
{"role": "assistant", "content": assistant_prompt}
|
377 |
+
]
|
378 |
+
return query
|
379 |
+
|
380 |
+
def prepare_message(self, model_input: dict | list[dict], batch: bool = False):
|
381 |
+
if self.use_checklist:
|
382 |
+
if self.use_multimodal:
|
383 |
+
user_prompt_template = JUDGE_OURS_BT_MODELING_MULTIMODAL_PROMPT_TEMPLATE
|
384 |
+
else:
|
385 |
+
user_prompt_template = JUDGE_OURS_BT_MODELING_PROMPT_TEMPLATE
|
386 |
+
else:
|
387 |
+
if self.use_multimodal:
|
388 |
+
user_prompt_template = JUDGE_OURS_BT_MODELING_MULTIMODAL_WO_CHECKLIST_PROMPT_TEMPLATE
|
389 |
+
else:
|
390 |
+
user_prompt_template = JUDGE_OURS_BT_MODELING_WO_CHECKLIST_PROMPT_TEMPLATE
|
391 |
+
|
392 |
+
if self.use_multimodal:
|
393 |
+
if batch:
|
394 |
+
message = [self._make_query(user_prompt_template, input) for input in model_input]
|
395 |
+
else:
|
396 |
+
message = [self._make_query(user_prompt_template, model_input)]
|
397 |
+
else:
|
398 |
+
if batch:
|
399 |
+
message = {
|
400 |
+
"query": [self._make_query(user_prompt_template, input) for input in model_input],
|
401 |
+
"promptts": []
|
402 |
+
}
|
403 |
+
else:
|
404 |
+
message = {
|
405 |
+
"query": self._make_query(user_prompt_template, model_input),
|
406 |
+
"prompts": []
|
407 |
+
}
|
408 |
+
|
409 |
+
return message
|
410 |
+
|
411 |
+
def get_rm_scroe(self, message: dict | list):
|
412 |
+
headers = {"Content-Type": "application/json"}
|
413 |
+
|
414 |
+
try:
|
415 |
+
if self.use_multimodal:
|
416 |
+
response = requests.post(
|
417 |
+
self.url,
|
418 |
+
json={"messages": message},
|
419 |
+
timeout=600
|
420 |
+
)
|
421 |
+
else:
|
422 |
+
response = requests.post(
|
423 |
+
self.url,
|
424 |
+
headers=headers,
|
425 |
+
data=json.dumps(message),
|
426 |
+
timeout=300
|
427 |
+
)
|
428 |
+
response.raise_for_status()
|
429 |
+
|
430 |
+
response_json = response.json()
|
431 |
+
|
432 |
+
if "rewards" not in response_json:
|
433 |
+
print(f"Error: 'rewards' key not found in API response: {response_json}")
|
434 |
+
return []
|
435 |
+
|
436 |
+
if "get_reward" in self.url:
|
437 |
+
# use openrlhf
|
438 |
+
return response_json["rewards"]
|
439 |
+
elif "pooling" in self.url:
|
440 |
+
# use vllm server
|
441 |
+
return response_json["reward"]
|
442 |
+
else:
|
443 |
+
# error
|
444 |
+
raise ValueError(f"Invalid URL: {self.url}")
|
445 |
+
|
446 |
+
except requests.exceptions.Timeout:
|
447 |
+
print(f"Error: Request timed out to {self.url}")
|
448 |
+
return []
|
449 |
+
except requests.exceptions.RequestException as e:
|
450 |
+
print(f"Error during request to {self.url}: {e}")
|
451 |
+
return []
|
452 |
+
except json.JSONDecodeError:
|
453 |
+
print(f"Error: Failed to decode JSON response from {self.url}")
|
454 |
+
return []
|
455 |
+
except KeyError as e:
|
456 |
+
print(f"Error: Missing key {e} in response from {self.url}")
|
457 |
+
return []
|
458 |
+
|
459 |
+
|
460 |
+
def generate_response(self, model_input: dict | list[dict], batch: bool = False):
|
461 |
+
if batch:
|
462 |
+
message = self.prepare_message(model_input, batch=True)
|
463 |
+
else:
|
464 |
+
message = self.prepare_message(model_input)
|
465 |
+
rewards = self.get_rm_scroe(message)
|
466 |
+
|
467 |
+
return rewards, 0
|
agent/mini_bench/checklist_eval.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
from langchain_openai import ChatOpenAI
|
4 |
+
|
5 |
+
from .agent import BaseAgent
|
6 |
+
|
7 |
+
SYSTEM_PROMPT = "You are an expert evaluator. Your task is to assess how well a Web Agent’s generated checklist aligns with the reference checklist for a given user instruction."
|
8 |
+
|
9 |
+
USER_PROMPT = """# Task Description
|
10 |
+
Use the provided task description, evaluation criteria, and both checklists to assign a score from 1 to 5. Justify your rating with a brief explanation that considers both content overlap and logical structure.
|
11 |
+
|
12 |
+
## Score Criteria
|
13 |
+
- 5: Checklist covers all subgoals, is correct and clearly expressed
|
14 |
+
- 4: Minor omissions or phrasing issues but mostly accurate and complete
|
15 |
+
- 3: Partially matches, but with noticeable gaps or errors
|
16 |
+
- 2: Incomplete or includes incorrect steps
|
17 |
+
- 1: Mostly irrelevant, incorrect, or missing the task goal
|
18 |
+
|
19 |
+
## User Instruction:
|
20 |
+
{intent}
|
21 |
+
|
22 |
+
## Reference Checklist:
|
23 |
+
{gt_checklist}
|
24 |
+
|
25 |
+
## Agent’s Generated Checklist:
|
26 |
+
{generated_checklist}
|
27 |
+
|
28 |
+
# Output Format
|
29 |
+
Your response should be in the following format:
|
30 |
+
REASON: [Write 2–4 sentences explaining how well the generated checklist matches the reference. Mention specific matches, omissions, errors, or strengths.]
|
31 |
+
SCORE: [1–5]
|
32 |
+
"""
|
33 |
+
|
34 |
+
|
35 |
+
class ChecklistEvalAgent(BaseAgent):
|
36 |
+
def __init__(self, agent_config: dict):
|
37 |
+
super().__init__(agent_config)
|
38 |
+
self._setup()
|
39 |
+
|
40 |
+
def prepare_message(self, model_input: dict, prompt_type):
|
41 |
+
message = [
|
42 |
+
{
|
43 |
+
"role": "system",
|
44 |
+
"content": SYSTEM_PROMPT
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"role": "user",
|
48 |
+
"content": USER_PROMPT.format(
|
49 |
+
intent=model_input["intent"],
|
50 |
+
gt_checklist=model_input["gt_checklist"],
|
51 |
+
generated_checklist=model_input["generated_checklist"]
|
52 |
+
)
|
53 |
+
}
|
54 |
+
]
|
55 |
+
return message
|
56 |
+
|
57 |
+
def generate_response(self, model_input: dict):
|
58 |
+
total_cost = 0
|
59 |
+
response_list = []
|
60 |
+
# prepare message
|
61 |
+
message = self.prepare_message(model_input)
|
62 |
+
|
63 |
+
# n sampling
|
64 |
+
for _ in range(self.num_generate):
|
65 |
+
response, cost = self.generate_with_retry(message, ["SCORE"])
|
66 |
+
response_list.append(response)
|
67 |
+
total_cost += cost
|
68 |
+
|
69 |
+
return response_list, total_cost
|
70 |
+
|
71 |
+
def parsing_score(response: str):
|
72 |
+
score = response.split("SCORE:")[-1].split("\n")[0].strip()
|
73 |
+
match = re.search(r'\d+', score)
|
74 |
+
|
75 |
+
if match:
|
76 |
+
return int(match.group())
|
77 |
+
else:
|
78 |
+
return None
|
79 |
+
|
80 |
+
def average_score(scores: list[int]):
|
81 |
+
if len(scores) == 0:
|
82 |
+
return 0
|
83 |
+
return sum(scores) / len(scores)
|
84 |
+
|
85 |
+
def get_score(results: list[dict]):
|
86 |
+
score_list = []
|
87 |
+
for result in results:
|
88 |
+
tmp_scores = [parsing_score(response) for response in result["response"]]
|
89 |
+
scores = [score for score in tmp_scores if score is not None]
|
90 |
+
result["score_list"] = scores
|
91 |
+
final_score = average_score(scores)
|
92 |
+
result["score"] = final_score
|
93 |
+
score_list.append(result)
|
94 |
+
|
95 |
+
return results, score_list
|
agent/mini_bench/eval_utils.py
ADDED
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import random
|
3 |
+
from collections import Counter
|
4 |
+
|
5 |
+
from .utils import load_json, save_json, create_html_report
|
6 |
+
|
7 |
+
random.seed(42)
|
8 |
+
def get_score(response_list: list, indicator: str) -> int:
|
9 |
+
if len(response_list) == 0:
|
10 |
+
return [-100]
|
11 |
+
|
12 |
+
if isinstance(response_list[0], float):
|
13 |
+
return response_list
|
14 |
+
|
15 |
+
if indicator == "prob":
|
16 |
+
score_list = []
|
17 |
+
for response in response_list:
|
18 |
+
total_score = 0
|
19 |
+
for judge_probs in response:
|
20 |
+
yes_prob = judge_probs.get("yes", 0)
|
21 |
+
in_progress_prob = judge_probs.get("in", 0)
|
22 |
+
total_score += yes_prob + in_progress_prob * 0.5
|
23 |
+
if len(response) > 0:
|
24 |
+
score_list.append(total_score / len(response))
|
25 |
+
else:
|
26 |
+
score_list.append(0)
|
27 |
+
return score_list
|
28 |
+
else:
|
29 |
+
score_list = []
|
30 |
+
for response in response_list:
|
31 |
+
if indicator == "SCORE":
|
32 |
+
if "SCORE" in response:
|
33 |
+
try:
|
34 |
+
score_str = response.split("SCORE:")[1].split("\n")[0].strip()
|
35 |
+
except:
|
36 |
+
score_str = response.split("SCORE:")[-1].strip()
|
37 |
+
# find first integer
|
38 |
+
try:
|
39 |
+
score = re.search(r'-?\d+', score_str).group()
|
40 |
+
score_list.append(int(score))
|
41 |
+
except:
|
42 |
+
score_list.append(0)
|
43 |
+
else:
|
44 |
+
try:
|
45 |
+
score_str = response.split("<answer>")[1].split("</answer>")[0].strip()
|
46 |
+
except:
|
47 |
+
score_str = response.split("<answer>")[-1].split("</answer>")[0].strip()
|
48 |
+
# find "Yes" or "No"
|
49 |
+
if "Yes" in score_str:
|
50 |
+
score_list.append(1)
|
51 |
+
elif "In Progress" in score_str:
|
52 |
+
score_list.append(0.5)
|
53 |
+
elif "No" in score_str:
|
54 |
+
score_list.append(0)
|
55 |
+
else:
|
56 |
+
score_list.append(0)
|
57 |
+
elif indicator == "JUDGE":
|
58 |
+
try:
|
59 |
+
judge_str = response.split("JUDGE:")[1].split("\n")[0].strip()
|
60 |
+
except:
|
61 |
+
judge_str = response.split("JUDGE:")[-1].strip()
|
62 |
+
if "Yes" in judge_str:
|
63 |
+
score_list.append(1)
|
64 |
+
elif "No" in judge_str:
|
65 |
+
score_list.append(0)
|
66 |
+
else:
|
67 |
+
score_list.append(0)
|
68 |
+
elif indicator == "CHECKLIST EVALUATION":
|
69 |
+
if "<answer>" in response:
|
70 |
+
try:
|
71 |
+
checklist_str = response.split("<answer>")[1].split("</answer>")[0].strip()
|
72 |
+
except:
|
73 |
+
checklist_str = response.split("<answer>")[-1].split("</answer>")[0].strip()
|
74 |
+
else:
|
75 |
+
checklist_str = response.split("CHECKLIST EVALUATION:")[-1].strip()
|
76 |
+
|
77 |
+
count_yes = checklist_str.count("Yes")
|
78 |
+
count_no = checklist_str.count("No")
|
79 |
+
count_in_progress = checklist_str.count("In Progress")
|
80 |
+
try:
|
81 |
+
total_score = (count_yes + count_in_progress*0.5) / (count_yes + count_no + count_in_progress)
|
82 |
+
except:
|
83 |
+
total_score = 0
|
84 |
+
score_list.append(total_score)
|
85 |
+
else:
|
86 |
+
raise ValueError(f"Invalid indicator: {indicator}")
|
87 |
+
return score_list
|
88 |
+
|
89 |
+
def get_acc_and_mrr(chosen_score, rejected_scores):
|
90 |
+
if len(rejected_scores) == 0:
|
91 |
+
return 0, False
|
92 |
+
|
93 |
+
same_score_num = rejected_scores.count(chosen_score)
|
94 |
+
all_scores = rejected_scores + [chosen_score]
|
95 |
+
sorted_scores = sorted(all_scores, reverse=True)
|
96 |
+
rank = sorted_scores.index(chosen_score) + 1 + same_score_num # draw penalty
|
97 |
+
if all(chosen_score > r for r in rejected_scores):
|
98 |
+
accuracy = True
|
99 |
+
else:
|
100 |
+
accuracy = False
|
101 |
+
return 1 / rank, accuracy
|
102 |
+
|
103 |
+
def average_score(score_list: list[float]):
|
104 |
+
if len(score_list) == 0:
|
105 |
+
return -100
|
106 |
+
return sum(score_list) / len(score_list)
|
107 |
+
|
108 |
+
def self_consistency_score(score_list: list[float]):
|
109 |
+
if len(score_list) == 0:
|
110 |
+
return -100
|
111 |
+
counter = Counter(score_list)
|
112 |
+
return max(counter.values()) / len(score_list)
|
113 |
+
|
114 |
+
def get_chosen_rejected_scores(data: dict, agg_func: str):
|
115 |
+
if len(data["chosen"]) == 0:
|
116 |
+
data["chosen"] = [{"score": [-100]}]
|
117 |
+
if len(data["rejected"]) == 0:
|
118 |
+
data["rejected"] = [{"score": [-100]}]
|
119 |
+
if not isinstance(data["chosen"][0], dict):
|
120 |
+
data["chosen"][0]["score"] = [-100]
|
121 |
+
if not isinstance(data["rejected"][0], dict):
|
122 |
+
data["rejected"][0]["score"] = [-100]
|
123 |
+
|
124 |
+
if agg_func == "average":
|
125 |
+
chosen_score = average_score(data["chosen"][0]["score"])
|
126 |
+
rejected_scores = [average_score(rejected_score["score"]) for rejected_score in data["rejected"]]
|
127 |
+
elif agg_func == "self_consistency":
|
128 |
+
chosen_score = self_consistency_score(data["chosen"][0]["score"])
|
129 |
+
rejected_scores = [self_consistency_score(rejected_score["score"]) for rejected_score in data["rejected"]]
|
130 |
+
else:
|
131 |
+
raise ValueError(f"Invalid agg_func: {agg_func}")
|
132 |
+
return chosen_score, rejected_scores
|
133 |
+
|
134 |
+
def get_score_results(results, agg_func):
|
135 |
+
score_dict = {"mrr": [], "accuracy": [], "traj_accuracy": []}
|
136 |
+
task_accuracy = {}
|
137 |
+
for result in results:
|
138 |
+
chosen_score, rejected_scores = get_chosen_rejected_scores(result, agg_func)
|
139 |
+
mrr, accuracy = get_acc_and_mrr(chosen_score, rejected_scores)
|
140 |
+
score_dict["mrr"].append(mrr)
|
141 |
+
score_dict["accuracy"].append(accuracy)
|
142 |
+
if result["task_id"] not in task_accuracy:
|
143 |
+
task_accuracy[result["task_id"]] = []
|
144 |
+
task_accuracy[result["task_id"]].append(accuracy)
|
145 |
+
|
146 |
+
for task_id in task_accuracy:
|
147 |
+
if sum(task_accuracy[task_id]) == len(task_accuracy[task_id]):
|
148 |
+
score_dict["traj_accuracy"].append(True)
|
149 |
+
else:
|
150 |
+
score_dict["traj_accuracy"].append(False)
|
151 |
+
|
152 |
+
return score_dict
|
153 |
+
|
154 |
+
def calculate_stats(results, agg_func: str="average"):
|
155 |
+
if len(results) == 0:
|
156 |
+
return {
|
157 |
+
"MRR": 0,
|
158 |
+
"Accuracy": 0,
|
159 |
+
"Traj_Accuracy": 0,
|
160 |
+
}
|
161 |
+
total_score = get_score_results(results, agg_func)
|
162 |
+
stats = {
|
163 |
+
"MRR": sum(total_score["mrr"]) / len(total_score["mrr"]),
|
164 |
+
"Accuracy": sum(total_score["accuracy"]) / len(total_score["accuracy"]),
|
165 |
+
"Traj_Accuracy": sum(total_score["traj_accuracy"]) / len(total_score["traj_accuracy"]),
|
166 |
+
}
|
167 |
+
|
168 |
+
return stats
|
169 |
+
|
170 |
+
def group_by_task(results, split_indicator: str):
|
171 |
+
# sort results by task_id and step_id
|
172 |
+
results.sort(key=lambda x: (x["task_id"], x["step_id"]))
|
173 |
+
# group by task_name
|
174 |
+
grouped_task_dict = {}
|
175 |
+
for result in results:
|
176 |
+
task_name = "task_" + str(result["task_id"]) + "_step_" + str(result["step_id"])
|
177 |
+
if task_name not in grouped_task_dict:
|
178 |
+
grouped_task_dict[task_name] = {
|
179 |
+
"task_id": result["task_id"],
|
180 |
+
"step_id": result["step_id"],
|
181 |
+
"intent": result["intent"],
|
182 |
+
"start_url": result["start_url"],
|
183 |
+
"gt_checklist": result["gt_checklist"],
|
184 |
+
"generated_checklist": result.get("generated_checklist", None) ,
|
185 |
+
"trajectory": result["trajectory"],
|
186 |
+
"current_url": result["current_url"],
|
187 |
+
"text_observation": result["text_observation"],
|
188 |
+
# "image_list": result["image_list"],
|
189 |
+
"chosen": [],
|
190 |
+
"rejected": [],
|
191 |
+
"source_name": result["source_name"],
|
192 |
+
}
|
193 |
+
|
194 |
+
response = result["response"] if "response" in result else []
|
195 |
+
type_data = {
|
196 |
+
"thought": result["thought"],
|
197 |
+
"action": result["action"],
|
198 |
+
"response": response,
|
199 |
+
"score": get_score(response, split_indicator) if split_indicator != "prob" else get_score(result["judge_probs"], split_indicator),
|
200 |
+
}
|
201 |
+
if split_indicator == "prob":
|
202 |
+
type_data["judge_probs"] = result["judge_probs"]
|
203 |
+
if result["type"] == "chosen":
|
204 |
+
grouped_task_dict[task_name]["chosen"].append(type_data)
|
205 |
+
elif result["type"] == "rejected":
|
206 |
+
grouped_task_dict[task_name]["rejected"].append(type_data)
|
207 |
+
|
208 |
+
return list(grouped_task_dict.values())
|
209 |
+
|
210 |
+
|
211 |
+
def processing_results(results, evaluation_mode: str, num_generate: int, use_batch: bool=False):
|
212 |
+
if "judge_probs" in results[0]:
|
213 |
+
split_indicator = "prob"
|
214 |
+
else:
|
215 |
+
if evaluation_mode == "judge_with_checklist_generation" or evaluation_mode == "judge_with_gt_checklist":
|
216 |
+
split_indicator = "CHECKLIST EVALUATION"
|
217 |
+
else:
|
218 |
+
split_indicator = "SCORE"
|
219 |
+
|
220 |
+
# if use_batch is True, make it flattened
|
221 |
+
if use_batch:
|
222 |
+
tmp_results = []
|
223 |
+
for result in results:
|
224 |
+
for d in result:
|
225 |
+
tmp_results.append(d)
|
226 |
+
grouped_results = group_by_task(tmp_results, split_indicator)
|
227 |
+
else:
|
228 |
+
grouped_results = group_by_task(results, split_indicator)
|
229 |
+
|
230 |
+
mind2web_results = []
|
231 |
+
webarena_results = []
|
232 |
+
mind2web_task_results = []
|
233 |
+
mind2web_website_results = []
|
234 |
+
mind2web_domain_results = []
|
235 |
+
|
236 |
+
for grouped_result in grouped_results:
|
237 |
+
if "mind2web" in grouped_result["source_name"]:
|
238 |
+
mind2web_results.append(grouped_result)
|
239 |
+
if grouped_result["source_name"] == "mind2web_test_task":
|
240 |
+
mind2web_task_results.append(grouped_result)
|
241 |
+
elif grouped_result["source_name"] == "mind2web_test_website":
|
242 |
+
mind2web_website_results.append(grouped_result)
|
243 |
+
elif grouped_result["source_name"] == "mind2web_test_domain":
|
244 |
+
mind2web_domain_results.append(grouped_result)
|
245 |
+
elif "webarena" in grouped_result["source_name"]:
|
246 |
+
webarena_results.append(grouped_result)
|
247 |
+
|
248 |
+
try:
|
249 |
+
final_stats = {
|
250 |
+
"mind2web": {
|
251 |
+
"MRR": {},
|
252 |
+
"Accuracy": {},
|
253 |
+
"Traj_Accuracy": {},
|
254 |
+
},
|
255 |
+
"webarena": {
|
256 |
+
"MRR": {},
|
257 |
+
"Accuracy": {},
|
258 |
+
"Traj_Accuracy": {},
|
259 |
+
},
|
260 |
+
"mind2web_task": {
|
261 |
+
"MRR": {},
|
262 |
+
"Accuracy": {},
|
263 |
+
"Traj_Accuracy": {},
|
264 |
+
},
|
265 |
+
"mind2web_website": {
|
266 |
+
"MRR": {},
|
267 |
+
"Accuracy": {},
|
268 |
+
"Traj_Accuracy": {},
|
269 |
+
},
|
270 |
+
"mind2web_domain": {
|
271 |
+
"MRR": {},
|
272 |
+
"Accuracy": {},
|
273 |
+
"Traj_Accuracy": {},
|
274 |
+
},
|
275 |
+
}
|
276 |
+
for source_results in [
|
277 |
+
("mind2web", mind2web_results),
|
278 |
+
("webarena", webarena_results),
|
279 |
+
("mind2web_task", mind2web_task_results),
|
280 |
+
("mind2web_website", mind2web_website_results),
|
281 |
+
("mind2web_domain", mind2web_domain_results)
|
282 |
+
]:
|
283 |
+
average_stats = calculate_stats(source_results[1], "average")
|
284 |
+
self_consistency_stats = calculate_stats(source_results[1], "self_consistency")
|
285 |
+
for metric in average_stats:
|
286 |
+
final_stats[source_results[0]][metric]["Average"] = average_stats[metric]
|
287 |
+
for metric in self_consistency_stats:
|
288 |
+
final_stats[source_results[0]][metric]["Self_Consistency"] = self_consistency_stats[metric]
|
289 |
+
|
290 |
+
if num_generate == 1:
|
291 |
+
for source_name in final_stats:
|
292 |
+
for metric in final_stats[source_name]:
|
293 |
+
print(f"{round(100 * final_stats[source_name][metric]['Average'], 2)}", end=", ")
|
294 |
+
print()
|
295 |
+
else:
|
296 |
+
for agg_func in ["Average", "Self_Consistency"]:
|
297 |
+
print(f"{agg_func}")
|
298 |
+
for source_name in final_stats:
|
299 |
+
for metric in final_stats[source_name]:
|
300 |
+
print(f"{round(100 * final_stats[source_name][metric][agg_func], 2)}", end=", ")
|
301 |
+
print()
|
302 |
+
except Exception as e:
|
303 |
+
print(e)
|
304 |
+
return grouped_results, None
|
305 |
+
|
306 |
+
# add function to convert json format results to html format results
|
307 |
+
# TODO: implement this function
|
308 |
+
# create_html_report(results, "results.html")
|
309 |
+
return grouped_results, final_stats
|
agent/mini_bench/inference_utils.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
|
3 |
+
from multiprocessing import Process, Manager
|
4 |
+
from tqdm import tqdm
|
5 |
+
|
6 |
+
|
7 |
+
def worker_main(work_queue, result_queue, process_func, config):
|
8 |
+
while True:
|
9 |
+
item = work_queue.get()
|
10 |
+
if item is None:
|
11 |
+
result_queue.put(None)
|
12 |
+
break
|
13 |
+
try:
|
14 |
+
results, cost = process_func(config, item)
|
15 |
+
result_queue.put((results, cost))
|
16 |
+
except Exception as e:
|
17 |
+
item_info = item.get('idx', item.get('id', 'unknown item'))
|
18 |
+
print(f"Error processing item {item_info}: {e}")
|
19 |
+
result_queue.put(None)
|
20 |
+
finally:
|
21 |
+
work_queue.task_done()
|
22 |
+
|
23 |
+
def run_parallel_evaluation(dataset, process_func, config, num_workers, description):
|
24 |
+
"""
|
25 |
+
Runs parallel evaluation on the given dataset and returns the results.
|
26 |
+
|
27 |
+
Args:
|
28 |
+
dataset (list or datasets.Dataset): Data to evaluate.
|
29 |
+
process_func (callable): Function to process each data item.
|
30 |
+
config (dict): Configuration for the process_func.
|
31 |
+
num_workers (int): Number of worker processes to use.
|
32 |
+
description (str): Description to display on the tqdm progress bar.
|
33 |
+
|
34 |
+
Returns:
|
35 |
+
tuple: (list of evaluation results, total cost)
|
36 |
+
"""
|
37 |
+
manager = Manager()
|
38 |
+
work_queue = manager.Queue()
|
39 |
+
result_queue = manager.Queue()
|
40 |
+
|
41 |
+
# Add data to the work queue
|
42 |
+
dataset_list = list(dataset) if not isinstance(dataset, list) else dataset
|
43 |
+
for data in dataset_list:
|
44 |
+
work_queue.put(data)
|
45 |
+
|
46 |
+
# Add termination signals for workers
|
47 |
+
for _ in range(num_workers):
|
48 |
+
work_queue.put(None)
|
49 |
+
|
50 |
+
# Start parallel processing
|
51 |
+
processes = []
|
52 |
+
for _ in range(num_workers):
|
53 |
+
p = Process(target=worker_main, args=(work_queue, result_queue, process_func, config))
|
54 |
+
p.start()
|
55 |
+
processes.append(p)
|
56 |
+
|
57 |
+
# Show progress bar and collect results
|
58 |
+
process_results = []
|
59 |
+
process_cost = 0
|
60 |
+
completed_workers = 0
|
61 |
+
|
62 |
+
with tqdm(total=len(dataset_list), desc=description) as pbar:
|
63 |
+
while completed_workers < num_workers:
|
64 |
+
result_item = result_queue.get()
|
65 |
+
if result_item is None:
|
66 |
+
completed_workers += 1
|
67 |
+
else:
|
68 |
+
results, cost = result_item
|
69 |
+
if results is not None:
|
70 |
+
process_results.append(results)
|
71 |
+
process_cost += cost if cost is not None else 0
|
72 |
+
pbar.update(1)
|
73 |
+
|
74 |
+
# Wait for all processes to finish
|
75 |
+
for p in processes:
|
76 |
+
p.join()
|
77 |
+
|
78 |
+
# Collect remaining results
|
79 |
+
while not result_queue.empty():
|
80 |
+
result_item = result_queue.get_nowait()
|
81 |
+
if result_item is not None:
|
82 |
+
results, cost = result_item
|
83 |
+
if results is not None:
|
84 |
+
process_results.append(results)
|
85 |
+
process_cost += cost if cost is not None else 0
|
86 |
+
|
87 |
+
return process_results, process_cost
|
agent/mini_bench/prompts/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .construct_messages import get_messages
|
agent/mini_bench/prompts/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (263 Bytes). View file
|
|
agent/mini_bench/prompts/__pycache__/action.cpython-311.pyc
ADDED
Binary file (2.85 kB). View file
|
|
agent/mini_bench/prompts/__pycache__/checklist_prompt.cpython-311.pyc
ADDED
Binary file (3.11 kB). View file
|
|
agent/mini_bench/prompts/__pycache__/construct_messages.cpython-311.pyc
ADDED
Binary file (15 kB). View file
|
|
agent/mini_bench/prompts/__pycache__/eval_type.cpython-311.pyc
ADDED
Binary file (5.46 kB). View file
|
|
agent/mini_bench/prompts/__pycache__/image_utils.cpython-311.pyc
ADDED
Binary file (1.71 kB). View file
|
|
agent/mini_bench/prompts/__pycache__/input_information.cpython-311.pyc
ADDED
Binary file (1.03 kB). View file
|
|
agent/mini_bench/prompts/__pycache__/judge_prompt.cpython-311.pyc
ADDED
Binary file (5.64 kB). View file
|
|