Spaces:
Sleeping
Sleeping
Alex
commited on
Commit
Β·
2812333
1
Parent(s):
527d3c4
app fixed
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import json
|
2 |
from pathlib import Path
|
3 |
from typing import List, Dict
|
|
|
4 |
|
5 |
import gradio as gr
|
6 |
from pydantic import BaseModel, Field, field_validator
|
@@ -8,6 +9,20 @@ from pydantic import BaseModel, Field, field_validator
|
|
8 |
# --------------- Configuration ---------------
|
9 |
LEADERBOARD_PATH = Path("leaderboard_data.json")
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
# --------------- Data models ---------------
|
12 |
class Metrics(BaseModel):
|
13 |
readability: int
|
@@ -49,20 +64,8 @@ def _load_leaderboard() -> List[Dict]:
|
|
49 |
"""Load leaderboard data with persistent storage support."""
|
50 |
if not LEADERBOARD_PATH.exists():
|
51 |
# Create default example data
|
52 |
-
|
53 |
-
|
54 |
-
"bleu": 0.5,
|
55 |
-
"llm_pass_1": 0.5,
|
56 |
-
"llm_pass_5": 0.5,
|
57 |
-
"llm_pass_10": 0.5,
|
58 |
-
"metrics": {
|
59 |
-
"readability": 5, "relevance": 5, "explanation_clarity": 5,
|
60 |
-
"problem_identification": 5, "actionability": 5, "completeness": 5,
|
61 |
-
"specificity": 5, "contextual_adequacy": 5, "consistency": 5, "brevity": 5
|
62 |
-
}
|
63 |
-
}]
|
64 |
-
_save_leaderboard(default_data)
|
65 |
-
return default_data
|
66 |
|
67 |
try:
|
68 |
with LEADERBOARD_PATH.open("r", encoding="utf-8") as f:
|
@@ -85,9 +88,10 @@ def _save_leaderboard(data: List[Dict]):
|
|
85 |
|
86 |
# --------------- Table data functions ---------------
|
87 |
|
88 |
-
def _table_data() -> List[List]:
|
89 |
"""Get main metrics table data."""
|
90 |
-
data
|
|
|
91 |
if not data:
|
92 |
return []
|
93 |
data.sort(key=lambda x: x["llm_pass_1"], reverse=True)
|
@@ -105,9 +109,10 @@ def _table_data() -> List[List]:
|
|
105 |
return table_rows
|
106 |
|
107 |
|
108 |
-
def _multimetric_table_data() -> List[List]:
|
109 |
"""Get multi-metric table data."""
|
110 |
-
data
|
|
|
111 |
if not data:
|
112 |
return []
|
113 |
data.sort(key=lambda x: x["llm_pass_1"], reverse=True)
|
@@ -134,6 +139,7 @@ def _multimetric_table_data() -> List[List]:
|
|
134 |
# --------------- Gradio callbacks ---------------
|
135 |
|
136 |
def submit_model(
|
|
|
137 |
model_name: str,
|
138 |
bleu: float,
|
139 |
llm_pass_1: float,
|
@@ -172,15 +178,16 @@ def submit_model(
|
|
172 |
},
|
173 |
)
|
174 |
except Exception as e:
|
175 |
-
return _table_data(), _multimetric_table_data(), f"β Submission failed: {e}"
|
176 |
|
177 |
-
data
|
|
|
178 |
# Replace existing model entry if any
|
179 |
data = [d for d in data if d["model_name"] != entry.model_name]
|
180 |
data.append(entry.dict())
|
181 |
_save_leaderboard(data)
|
182 |
|
183 |
-
return _table_data(), _multimetric_table_data(), "β
Submission recorded!"
|
184 |
|
185 |
|
186 |
# --------------- Interface ---------------
|
@@ -188,8 +195,12 @@ with gr.Blocks(title="CodeReview Leaderboard") as demo:
|
|
188 |
gr.Markdown("""# π CodeReview Leaderboard\nSubmit your model results below. Leaderboard is sorted by **Pass@1**. """)
|
189 |
|
190 |
# Initialize table data
|
191 |
-
|
192 |
-
|
|
|
|
|
|
|
|
|
193 |
|
194 |
leaderboard_df = gr.Dataframe(
|
195 |
headers=["Model", "BLEU", "Pass@1", "Pass@5", "Pass@10"],
|
@@ -234,6 +245,7 @@ with gr.Blocks(title="CodeReview Leaderboard") as demo:
|
|
234 |
submit_btn.click(
|
235 |
fn=submit_model,
|
236 |
inputs=[
|
|
|
237 |
model_name_inp,
|
238 |
bleu_inp,
|
239 |
pass1_inp,
|
@@ -250,7 +262,7 @@ with gr.Blocks(title="CodeReview Leaderboard") as demo:
|
|
250 |
consistency_inp,
|
251 |
brevity_inp,
|
252 |
],
|
253 |
-
outputs=[leaderboard_df, multimetric_df, status_markdown],
|
254 |
api_name="submit_model",
|
255 |
)
|
256 |
|
|
|
1 |
import json
|
2 |
from pathlib import Path
|
3 |
from typing import List, Dict
|
4 |
+
import os
|
5 |
|
6 |
import gradio as gr
|
7 |
from pydantic import BaseModel, Field, field_validator
|
|
|
9 |
# --------------- Configuration ---------------
|
10 |
LEADERBOARD_PATH = Path("leaderboard_data.json")
|
11 |
|
12 |
+
# Initialize with default data
|
13 |
+
DEFAULT_DATA = [{
|
14 |
+
"model_name": "example/model",
|
15 |
+
"bleu": 0.5,
|
16 |
+
"llm_pass_1": 0.5,
|
17 |
+
"llm_pass_5": 0.5,
|
18 |
+
"llm_pass_10": 0.5,
|
19 |
+
"metrics": {
|
20 |
+
"readability": 5, "relevance": 5, "explanation_clarity": 5,
|
21 |
+
"problem_identification": 5, "actionability": 5, "completeness": 5,
|
22 |
+
"specificity": 5, "contextual_adequacy": 5, "consistency": 5, "brevity": 5
|
23 |
+
}
|
24 |
+
}]
|
25 |
+
|
26 |
# --------------- Data models ---------------
|
27 |
class Metrics(BaseModel):
|
28 |
readability: int
|
|
|
64 |
"""Load leaderboard data with persistent storage support."""
|
65 |
if not LEADERBOARD_PATH.exists():
|
66 |
# Create default example data
|
67 |
+
_save_leaderboard(DEFAULT_DATA)
|
68 |
+
return DEFAULT_DATA
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
70 |
try:
|
71 |
with LEADERBOARD_PATH.open("r", encoding="utf-8") as f:
|
|
|
88 |
|
89 |
# --------------- Table data functions ---------------
|
90 |
|
91 |
+
def _table_data(data: List[Dict] = None) -> List[List]:
|
92 |
"""Get main metrics table data."""
|
93 |
+
if data is None:
|
94 |
+
data = _load_leaderboard()
|
95 |
if not data:
|
96 |
return []
|
97 |
data.sort(key=lambda x: x["llm_pass_1"], reverse=True)
|
|
|
109 |
return table_rows
|
110 |
|
111 |
|
112 |
+
def _multimetric_table_data(data: List[Dict] = None) -> List[List]:
|
113 |
"""Get multi-metric table data."""
|
114 |
+
if data is None:
|
115 |
+
data = _load_leaderboard()
|
116 |
if not data:
|
117 |
return []
|
118 |
data.sort(key=lambda x: x["llm_pass_1"], reverse=True)
|
|
|
139 |
# --------------- Gradio callbacks ---------------
|
140 |
|
141 |
def submit_model(
|
142 |
+
current_data: List[Dict],
|
143 |
model_name: str,
|
144 |
bleu: float,
|
145 |
llm_pass_1: float,
|
|
|
178 |
},
|
179 |
)
|
180 |
except Exception as e:
|
181 |
+
return current_data, _table_data(current_data), _multimetric_table_data(current_data), f"β Submission failed: {e}"
|
182 |
|
183 |
+
# Use current data from state
|
184 |
+
data = current_data.copy() if current_data else []
|
185 |
# Replace existing model entry if any
|
186 |
data = [d for d in data if d["model_name"] != entry.model_name]
|
187 |
data.append(entry.dict())
|
188 |
_save_leaderboard(data)
|
189 |
|
190 |
+
return data, _table_data(data), _multimetric_table_data(data), "β
Submission recorded!"
|
191 |
|
192 |
|
193 |
# --------------- Interface ---------------
|
|
|
195 |
gr.Markdown("""# π CodeReview Leaderboard\nSubmit your model results below. Leaderboard is sorted by **Pass@1**. """)
|
196 |
|
197 |
# Initialize table data
|
198 |
+
initial_leaderboard_data = _load_leaderboard()
|
199 |
+
initial_data = _table_data(initial_leaderboard_data)
|
200 |
+
initial_multimetric_data = _multimetric_table_data(initial_leaderboard_data)
|
201 |
+
|
202 |
+
# State to store leaderboard data
|
203 |
+
leaderboard_state = gr.State(value=initial_leaderboard_data)
|
204 |
|
205 |
leaderboard_df = gr.Dataframe(
|
206 |
headers=["Model", "BLEU", "Pass@1", "Pass@5", "Pass@10"],
|
|
|
245 |
submit_btn.click(
|
246 |
fn=submit_model,
|
247 |
inputs=[
|
248 |
+
leaderboard_state,
|
249 |
model_name_inp,
|
250 |
bleu_inp,
|
251 |
pass1_inp,
|
|
|
262 |
consistency_inp,
|
263 |
brevity_inp,
|
264 |
],
|
265 |
+
outputs=[leaderboard_state, leaderboard_df, multimetric_df, status_markdown],
|
266 |
api_name="submit_model",
|
267 |
)
|
268 |
|