Alex commited on
Commit
2017254
·
1 Parent(s): a72a723
app.py CHANGED
@@ -1,276 +1,192 @@
1
- import gradio as gr
2
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
- import pandas as pd
4
- from apscheduler.schedulers.background import BackgroundScheduler
5
- from huggingface_hub import snapshot_download
6
- from fastapi import FastAPI
7
- from src.api_submit_results import router as submission_router
8
-
9
- from src.about import (
10
- CITATION_BUTTON_LABEL,
11
- CITATION_BUTTON_TEXT,
12
- EVALUATION_QUEUE_TEXT,
13
- INTRODUCTION_TEXT,
14
- LLM_BENCHMARKS_TEXT,
15
- TITLE,
16
- )
17
- from src.display.css_html_js import custom_css
18
- from src.display.utils import (
19
- BENCHMARK_COLS,
20
- COLS,
21
- EVAL_COLS,
22
- EVAL_TYPES,
23
- AutoEvalColumn,
24
- ModelType,
25
- fields,
26
- WeightType,
27
- Precision
28
- )
29
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
30
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
31
- from src.submission.submit import add_new_eval, add_manual_results
32
-
33
-
34
- def restart_space():
35
- API.restart_space(repo_id=REPO_ID)
36
-
37
- ### Space initialisation
38
- try:
39
- print(EVAL_REQUESTS_PATH)
40
- snapshot_download(
41
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
42
- )
43
- except Exception:
44
- restart_space()
45
- try:
46
- print(EVAL_RESULTS_PATH)
47
- snapshot_download(
48
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
49
- )
50
- except Exception:
51
- restart_space()
52
-
53
 
54
- LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
55
-
56
- (
57
- finished_eval_queue_df,
58
- running_eval_queue_df,
59
- pending_eval_queue_df,
60
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
61
-
62
- def init_leaderboard(dataframe):
63
- if dataframe is None or dataframe.empty:
64
- raise ValueError("Leaderboard DataFrame is empty or None.")
65
- return Leaderboard(
66
- value=dataframe,
67
- datatype=[c.type for c in fields(AutoEvalColumn)],
68
- select_columns=SelectColumns(
69
- default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
70
- cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
71
- label="Select Columns to Display:",
72
- ),
73
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
74
- hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
75
- filter_columns=[
76
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
77
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
78
- ColumnFilter(
79
- AutoEvalColumn.params.name,
80
- type="slider",
81
- min=0.01,
82
- max=150,
83
- label="Select the number of parameters (B)",
84
- ),
85
- ColumnFilter(
86
- AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
87
- ),
88
- ],
89
- bool_checkboxgroup_label="Hide models",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  interactive=False,
91
  )
92
 
93
-
94
- demo = gr.Blocks(css=custom_css)
95
- with demo:
96
- gr.HTML(TITLE)
97
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
98
-
99
- with gr.Tabs(elem_classes="tab-buttons") as tabs:
100
- with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
101
- leaderboard = init_leaderboard(LEADERBOARD_DF)
102
-
103
- with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
104
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
105
-
106
- with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
107
- with gr.Column():
108
- with gr.Row():
109
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
110
-
111
- with gr.Column():
112
- with gr.Accordion(
113
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
114
- open=False,
115
- ):
116
- with gr.Row():
117
- finished_eval_table = gr.components.Dataframe(
118
- value=finished_eval_queue_df,
119
- headers=EVAL_COLS,
120
- datatype=EVAL_TYPES,
121
- row_count=5,
122
- )
123
- with gr.Accordion(
124
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
125
- open=False,
126
- ):
127
- with gr.Row():
128
- running_eval_table = gr.components.Dataframe(
129
- value=running_eval_queue_df,
130
- headers=EVAL_COLS,
131
- datatype=EVAL_TYPES,
132
- row_count=5,
133
- )
134
-
135
- with gr.Accordion(
136
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
137
- open=False,
138
- ):
139
- with gr.Row():
140
- pending_eval_table = gr.components.Dataframe(
141
- value=pending_eval_queue_df,
142
- headers=EVAL_COLS,
143
- datatype=EVAL_TYPES,
144
- row_count=5,
145
- )
146
- with gr.Row():
147
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
148
-
149
- with gr.Row():
150
- with gr.Column():
151
- model_name_textbox = gr.Textbox(label="Model name")
152
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
153
- model_type = gr.Dropdown(
154
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
155
- label="Model type",
156
- multiselect=False,
157
- value=None,
158
- interactive=True,
159
- )
160
-
161
- with gr.Column():
162
- precision = gr.Dropdown(
163
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
164
- label="Precision",
165
- multiselect=False,
166
- value="float16",
167
- interactive=True,
168
- )
169
- weight_type = gr.Dropdown(
170
- choices=[i.value.name for i in WeightType],
171
- label="Weights type",
172
- multiselect=False,
173
- value="Original",
174
- interactive=True,
175
- )
176
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
177
-
178
- submit_button = gr.Button("Submit Eval")
179
- submission_result = gr.Markdown()
180
- submit_button.click(
181
- add_new_eval,
182
- [
183
- model_name_textbox,
184
- base_model_name_textbox,
185
- revision_name_textbox,
186
- precision,
187
- weight_type,
188
- model_type,
189
- ],
190
- submission_result,
191
- )
192
-
193
- # ----------------------------------------------------
194
- # Manual metrics submission form
195
- # ----------------------------------------------------
196
- gr.Markdown("## 📝 Submit metrics manually (advanced)")
197
-
198
- with gr.Row():
199
- with gr.Column():
200
- model_name_metrics = gr.Textbox(label="Model name", placeholder="org/model")
201
- revision_metrics = gr.Textbox(label="Revision commit", placeholder="main", value="main")
202
- bleu_input = gr.Number(label="BLEU", value=0.5)
203
- pass1_input = gr.Number(label="Pass@1", value=0.5, minimum=0.0, maximum=1.0)
204
- pass5_input = gr.Number(label="Pass@5", value=0.5, minimum=0.0, maximum=1.0)
205
- pass10_input = gr.Number(label="Pass@10", value=0.5, minimum=0.0, maximum=1.0)
206
-
207
- with gr.Column():
208
- # Subjective metrics sliders (0-5)
209
- readability_slider = gr.Slider(0, 5, step=1, value=3, label="Readability")
210
- relevance_slider = gr.Slider(0, 5, step=1, value=3, label="Relevance")
211
- explanation_slider = gr.Slider(0, 5, step=1, value=3, label="Explanation clarity")
212
- problem_slider = gr.Slider(0, 5, step=1, value=3, label="Problem identification")
213
- actionability_slider = gr.Slider(0, 5, step=1, value=3, label="Actionability")
214
- completeness_slider = gr.Slider(0, 5, step=1, value=3, label="Completeness")
215
- specificity_slider = gr.Slider(0, 5, step=1, value=3, label="Specificity")
216
- contextual_slider = gr.Slider(0, 5, step=1, value=3, label="Contextual adequacy")
217
- consistency_slider = gr.Slider(0, 5, step=1, value=3, label="Consistency")
218
- brevity_slider = gr.Slider(0, 5, step=1, value=3, label="Brevity")
219
-
220
- submit_metrics_button = gr.Button("Submit Metrics")
221
- metrics_submission_result = gr.Markdown()
222
-
223
- submit_metrics_button.click(
224
- add_manual_results,
225
- [
226
- model_name_metrics,
227
- revision_metrics,
228
- bleu_input,
229
- readability_slider,
230
- relevance_slider,
231
- explanation_slider,
232
- problem_slider,
233
- actionability_slider,
234
- completeness_slider,
235
- specificity_slider,
236
- contextual_slider,
237
- consistency_slider,
238
- brevity_slider,
239
- pass1_input,
240
- pass5_input,
241
- pass10_input,
242
- ],
243
- metrics_submission_result,
244
- )
245
-
246
- with gr.Row():
247
- with gr.Accordion("📙 Citation", open=False):
248
- citation_button = gr.Textbox(
249
- value=CITATION_BUTTON_TEXT,
250
- label=CITATION_BUTTON_LABEL,
251
- lines=20,
252
- elem_id="citation-button",
253
- show_copy_button=True,
254
- )
255
-
256
- # ------------------------------
257
- # Start background scheduler
258
- # ------------------------------
259
- scheduler = BackgroundScheduler()
260
- scheduler.add_job(restart_space, "interval", seconds=1800)
261
- scheduler.start()
262
-
263
- # ------------------------------
264
- # Mount Gradio UI into FastAPI application
265
- # ------------------------------
266
- # Removed direct .launch(); Gradio UI will be served via the mounted FastAPI `app`.
267
-
268
- # ------------------ FastAPI mounting ------------------
269
- backend = FastAPI()
270
- backend.include_router(submission_router)
271
-
272
- # Enable queuing (same limit as before)
273
- demo = demo.queue(default_concurrency_limit=40)
274
-
275
- # Expose `app` for the HF Spaces runtime
276
- app = gr.mount_gradio_app(backend, demo, path="/")
 
1
+ import json
2
+ from pathlib import Path
3
+ from typing import List, Dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
+ import gradio as gr
6
+ from pydantic import BaseModel, Field, validator
7
+
8
+ # --------------- Configuration ---------------
9
+ LEADERBOARD_PATH = Path("leaderboard_data.json")
10
+ DEFAULT_MODEL_NAME = "example/model"
11
+
12
+ # --------------- Data models ---------------
13
+ class Metrics(BaseModel):
14
+ readability: float
15
+ relevance: float
16
+ explanation_clarity: float = Field(alias="explanation_clarity")
17
+ problem_identification: float
18
+ actionability: float
19
+ completeness: float
20
+ specificity: float
21
+ contextual_adequacy: float
22
+ consistency: float
23
+ brevity: float
24
+
25
+
26
+ class LeaderboardEntry(BaseModel):
27
+ model_name: str
28
+ bleu: float
29
+ llm_pass_1: float
30
+ llm_pass_5: float
31
+ llm_pass_10: float
32
+ metrics: Metrics
33
+
34
+ @validator("bleu", "llm_pass_1", "llm_pass_5", "llm_pass_10", each_item=True)
35
+ def score_range(cls, v: float):
36
+ if not 0.0 <= v <= 1.0:
37
+ raise ValueError("Scores should be between 0 and 1")
38
+ return v
39
+
40
+
41
+ # --------------- Persistence helpers ---------------
42
+
43
+ def _load_leaderboard() -> List[Dict]:
44
+ if not LEADERBOARD_PATH.exists():
45
+ return []
46
+ with LEADERBOARD_PATH.open("r", encoding="utf-8") as f:
47
+ data = json.load(f)
48
+ return data.get("leaderboard", [])
49
+
50
+
51
+ def _save_leaderboard(data: List[Dict]):
52
+ to_store = {"leaderboard": data}
53
+ with LEADERBOARD_PATH.open("w", encoding="utf-8") as f:
54
+ json.dump(to_store, f, indent=2)
55
+
56
+
57
+ # --------------- Utility ---------------
58
+
59
+ def _flatten_entry(entry: Dict) -> Dict:
60
+ """Flatten nested metrics so that every metric is a column."""
61
+ flat = {
62
+ "Model": entry["model_name"],
63
+ "BLEU": entry["bleu"],
64
+ "Pass@1": entry["llm_pass_1"],
65
+ "Pass@5": entry["llm_pass_5"],
66
+ "Pass@10": entry["llm_pass_10"],
67
+ }
68
+ for metric_name, score in entry["metrics"].items():
69
+ flat[metric_name.replace("_", " ").title()] = score
70
+ return flat
71
+
72
+
73
+ def _table_data() -> List[Dict]:
74
+ data = _load_leaderboard()
75
+ # Sort descending by pass@1 as requested
76
+ data.sort(key=lambda x: x["llm_pass_1"], reverse=True)
77
+ return [_flatten_entry(e) for e in data]
78
+
79
+
80
+ # --------------- Gradio callbacks ---------------
81
+
82
+ def submit_model(
83
+ model_name: str,
84
+ bleu: float,
85
+ llm_pass_1: float,
86
+ llm_pass_5: float,
87
+ llm_pass_10: float,
88
+ readability: float,
89
+ relevance: float,
90
+ explanation_clarity: float,
91
+ problem_identification: float,
92
+ actionability: float,
93
+ completeness: float,
94
+ specificity: float,
95
+ contextual_adequacy: float,
96
+ consistency: float,
97
+ brevity: float,
98
+ ):
99
+ """Validate and append a new model entry to the leaderboard."""
100
+ try:
101
+ entry = LeaderboardEntry(
102
+ model_name=model_name.strip(),
103
+ bleu=bleu,
104
+ llm_pass_1=llm_pass_1,
105
+ llm_pass_5=llm_pass_5,
106
+ llm_pass_10=llm_pass_10,
107
+ metrics={
108
+ "readability": readability,
109
+ "relevance": relevance,
110
+ "explanation_clarity": explanation_clarity,
111
+ "problem_identification": problem_identification,
112
+ "actionability": actionability,
113
+ "completeness": completeness,
114
+ "specificity": specificity,
115
+ "contextual_adequacy": contextual_adequacy,
116
+ "consistency": consistency,
117
+ "brevity": brevity,
118
+ },
119
+ )
120
+ except Exception as e:
121
+ return gr.update(value=_table_data()), gr.update(value=f"❌ Submission failed: {e}")
122
+
123
+ data = _load_leaderboard()
124
+ # Replace existing model entry if any
125
+ data = [d for d in data if d["model_name"] != entry.model_name]
126
+ data.append(entry.dict())
127
+ _save_leaderboard(data)
128
+
129
+ return gr.update(value=_table_data()), gr.update(value="✅ Submission recorded!")
130
+
131
+
132
+ # --------------- Interface ---------------
133
+ with gr.Blocks(title="Custom LLM Leaderboard") as demo:
134
+ gr.Markdown("""# 🏆 LLM Leaderboard\nSubmit your model results below. Leaderboard is sorted by **Pass@1**. """)
135
+
136
+ leaderboard_df = gr.Dataframe(
137
+ headers=list(_table_data()[0].keys()) if _table_data() else [],
138
+ value=_table_data(),
139
+ label="Current Leaderboard",
140
  interactive=False,
141
  )
142
 
143
+ gr.Markdown("## 🔄 Submit new model results")
144
+
145
+ with gr.Accordion("Submission form", open=False):
146
+ with gr.Row():
147
+ model_name_inp = gr.Text(label="Model name (org/model)", value="")
148
+ bleu_inp = gr.Number(label="BLEU", value=0.0, minimum=0.0, maximum=1.0)
149
+ pass1_inp = gr.Number(label="Pass@1", value=0.0, minimum=0.0, maximum=1.0)
150
+ pass5_inp = gr.Number(label="Pass@5", value=0.0, minimum=0.0, maximum=1.0)
151
+ pass10_inp = gr.Number(label="Pass@10", value=0.0, minimum=0.0, maximum=1.0)
152
+
153
+ gr.Markdown("### Multi-metric subjective scores (0.0 – 1.0)")
154
+ with gr.Row():
155
+ readability_inp = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="Readability")
156
+ relevance_inp = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="Relevance")
157
+ explanation_inp = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="Explanation Clarity")
158
+ problem_inp = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="Problem Identification")
159
+ actionability_inp = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="Actionability")
160
+ completeness_inp = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="Completeness")
161
+ specificity_inp = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="Specificity")
162
+ contextual_inp = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="Contextual Adequacy")
163
+ consistency_inp = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="Consistency")
164
+ brevity_inp = gr.Slider(minimum=0.0, maximum=1.0, value=0.0, step=0.05, label="Brevity")
165
+
166
+ submit_btn = gr.Button("Submit")
167
+ status_markdown = gr.Markdown("")
168
+
169
+ submit_btn.click(
170
+ fn=submit_model,
171
+ inputs=[
172
+ model_name_inp,
173
+ bleu_inp,
174
+ pass1_inp,
175
+ pass5_inp,
176
+ pass10_inp,
177
+ readability_inp,
178
+ relevance_inp,
179
+ explanation_inp,
180
+ problem_inp,
181
+ actionability_inp,
182
+ completeness_inp,
183
+ specificity_inp,
184
+ contextual_inp,
185
+ consistency_inp,
186
+ brevity_inp,
187
+ ],
188
+ outputs=[leaderboard_df, status_markdown],
189
+ )
190
+
191
+ # Expose app variable for Spaces
192
+ app = demo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
leaderboard_data.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "leaderboard": [
3
+ {
4
+ "model_name": "example/model",
5
+ "bleu": 0.5,
6
+ "llm_pass_1": 0.5,
7
+ "llm_pass_5": 0.5,
8
+ "llm_pass_10": 0.5,
9
+ "metrics": {
10
+ "readability": 0.5,
11
+ "relevance": 0.5,
12
+ "explanation_clarity": 0.5,
13
+ "problem_identification": 0.5,
14
+ "actionability": 0.5,
15
+ "completeness": 0.5,
16
+ "specificity": 0.5,
17
+ "contextual_adequacy": 0.5,
18
+ "consistency": 0.5,
19
+ "brevity": 0.5
20
+ }
21
+ }
22
+ ]
23
+ }
src/about.py DELETED
@@ -1,91 +0,0 @@
1
- from dataclasses import dataclass
2
- from enum import Enum
3
-
4
- @dataclass
5
- class Task:
6
- benchmark: str
7
- metric: str
8
- col_name: str
9
-
10
-
11
- # Select your metrics here
12
- # ---------------------------------------------------
13
- # Each entry: first argument is the key inside "results" dict in the result JSON,
14
- # second is the metric key inside that sub-dict (we use "score" everywhere for uniformity),
15
- # third is the column name displayed in the leaderboard.
16
-
17
- class Tasks(Enum):
18
- bleu = Task("bleu", "score", "BLEU ⬆️")
19
- multimetric = Task("multimetric", "score", "Multimetric ⬆️")
20
-
21
- readability = Task("readability", "score", "Readability")
22
- relevance = Task("relevance", "score", "Relevance")
23
- explanation_clarity = Task("explanation_clarity", "score", "Explanation clarity")
24
- problem_identification = Task("problem_identification", "score", "Problem identification")
25
- actionability = Task("actionability", "score", "Actionability")
26
- completeness = Task("completeness", "score", "Completeness")
27
- specificity = Task("specificity", "score", "Specificity")
28
- contextual_adequacy = Task("contextual_adequacy", "score", "Contextual adequacy")
29
- consistency = Task("consistency", "score", "Consistency")
30
- brevity = Task("brevity", "score", "Brevity")
31
-
32
- pass_at_1 = Task("pass_at_1", "score", "Pass@1 ⬆️")
33
- pass_at_5 = Task("pass_at_5", "score", "Pass@5")
34
- pass_at_10 = Task("pass_at_10", "score", "Pass@10")
35
-
36
-
37
- NUM_FEWSHOT = 0 # Not applicable here but kept for compatibility
38
- # ---------------------------------------------------
39
-
40
-
41
-
42
- # Your leaderboard name
43
- TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
44
-
45
- # What does your leaderboard evaluate?
46
- INTRODUCTION_TEXT = """
47
- Intro text
48
- """
49
-
50
- # Which evaluations are you running? how can people reproduce what you have?
51
- LLM_BENCHMARKS_TEXT = f"""
52
- ## How it works
53
-
54
- ## Reproducibility
55
- To reproduce our results, here is the commands you can run:
56
-
57
- """
58
-
59
- EVALUATION_QUEUE_TEXT = """
60
- ## Some good practices before submitting a model
61
-
62
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
63
- ```python
64
- from transformers import AutoConfig, AutoModel, AutoTokenizer
65
- config = AutoConfig.from_pretrained("your model name", revision=revision)
66
- model = AutoModel.from_pretrained("your model name", revision=revision)
67
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
68
- ```
69
- If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
70
-
71
- Note: make sure your model is public!
72
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
73
-
74
- ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
75
- It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
76
-
77
- ### 3) Make sure your model has an open license!
78
- This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
79
-
80
- ### 4) Fill up your model card
81
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
82
-
83
- ## In case of model failure
84
- If your model is displayed in the `FAILED` category, its execution stopped.
85
- Make sure you have followed the above steps first.
86
- If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
87
- """
88
-
89
- CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
90
- CITATION_BUTTON_TEXT = r"""
91
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/api_submit_results.py DELETED
@@ -1,116 +0,0 @@
1
- from datetime import datetime, timezone
2
- import json
3
- import os
4
- import uuid
5
-
6
- from fastapi import APIRouter, HTTPException
7
- from pydantic import BaseModel, Field, validator
8
-
9
- from src.envs import API, RESULTS_REPO, EVAL_RESULTS_PATH, TOKEN
10
-
11
- router = APIRouter(prefix="/api", tags=["submission"])
12
-
13
- ALL_SUBJECTIVE_FIELDS = [
14
- "readability",
15
- "relevance",
16
- "explanation_clarity",
17
- "problem_identification",
18
- "actionability",
19
- "completeness",
20
- "specificity",
21
- "contextual_adequacy",
22
- "consistency",
23
- "brevity",
24
- ]
25
-
26
-
27
- class ResultPayload(BaseModel):
28
- model: str = Field(..., description="Model id on the Hub (e.g. org/model)")
29
- revision: str = Field("main", description="Commit sha or branch (default: main)")
30
- bleu: float = Field(..., ge=0, description="BLEU score (0-100)")
31
-
32
- # 10 subjective metrics 0-5
33
- readability: int = Field(..., ge=0, le=5)
34
- relevance: int = Field(..., ge=0, le=5)
35
- explanation_clarity: int = Field(..., ge=0, le=5)
36
- problem_identification: int = Field(..., ge=0, le=5)
37
- actionability: int = Field(..., ge=0, le=5)
38
- completeness: int = Field(..., ge=0, le=5)
39
- specificity: int = Field(..., ge=0, le=5)
40
- contextual_adequacy: int = Field(..., ge=0, le=5)
41
- consistency: int = Field(..., ge=0, le=5)
42
- brevity: int = Field(..., ge=0, le=5)
43
-
44
- pass_at_1: float = Field(..., ge=0, le=1)
45
- pass_at_5: float = Field(..., ge=0, le=1)
46
- pass_at_10: float = Field(..., ge=0, le=1)
47
-
48
- @validator("pass_at_5")
49
- def _p5_ge_p1(cls, v, values):
50
- if "pass_at_1" in values and v < values["pass_at_1"]:
51
- raise ValueError("pass@5 must be >= pass@1")
52
- return v
53
-
54
- @validator("pass_at_10")
55
- def _p10_ge_p5(cls, v, values):
56
- if "pass_at_5" in values and v < values["pass_at_5"]:
57
- raise ValueError("pass@10 must be >= pass@5")
58
- return v
59
-
60
- def multimetric(self) -> float:
61
- total = sum(getattr(self, f) for f in ALL_SUBJECTIVE_FIELDS)
62
- return float(total) / len(ALL_SUBJECTIVE_FIELDS)
63
-
64
-
65
- @router.post("/submit", status_code=200)
66
- async def submit_results(payload: ResultPayload):
67
- """Accept new evaluation results and push them to the results dataset."""
68
-
69
- # Prepare JSON in expected format (compatible with read_evals.py)
70
- results_dict = {
71
- "config": {
72
- "model_dtype": "unknown",
73
- "model_name": payload.model,
74
- "model_sha": payload.revision,
75
- },
76
- "results": {},
77
- }
78
-
79
- # Primary metrics
80
- results_dict["results"]["bleu"] = {"score": payload.bleu}
81
- results_dict["results"]["multimetric"] = {"score": payload.multimetric()}
82
-
83
- # Subjective metrics
84
- for field in ALL_SUBJECTIVE_FIELDS:
85
- results_dict["results"][field] = {"score": getattr(payload, field)}
86
-
87
- # Pass@k metrics
88
- results_dict["results"]["pass_at_1"] = {"score": payload.pass_at_1}
89
- results_dict["results"]["pass_at_5"] = {"score": payload.pass_at_5}
90
- results_dict["results"]["pass_at_10"] = {"score": payload.pass_at_10}
91
-
92
- # File handling
93
- os.makedirs(EVAL_RESULTS_PATH, exist_ok=True)
94
- ts = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
95
- unique_id = uuid.uuid4().hex[:8]
96
- filename = f"results_{payload.model.replace('/', '_')}_{ts}_{unique_id}.json"
97
- local_path = os.path.join(EVAL_RESULTS_PATH, filename)
98
-
99
- with open(local_path, "w") as fp:
100
- json.dump(results_dict, fp)
101
-
102
- try:
103
- API.upload_file(
104
- path_or_fileobj=local_path,
105
- path_in_repo=filename,
106
- repo_id=RESULTS_REPO,
107
- repo_type="dataset",
108
- commit_message=f"Add results for {payload.model}",
109
- )
110
- except Exception as e:
111
- raise HTTPException(status_code=500, detail=f"Failed to upload results: {e}")
112
- finally:
113
- if os.path.exists(local_path):
114
- os.remove(local_path)
115
-
116
- return {"status": "ok", "detail": "Results submitted."}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/css_html_js.py DELETED
@@ -1,105 +0,0 @@
1
- custom_css = """
2
-
3
- .markdown-text {
4
- font-size: 16px !important;
5
- }
6
-
7
- #models-to-add-text {
8
- font-size: 18px !important;
9
- }
10
-
11
- #citation-button span {
12
- font-size: 16px !important;
13
- }
14
-
15
- #citation-button textarea {
16
- font-size: 16px !important;
17
- }
18
-
19
- #citation-button > label > button {
20
- margin: 6px;
21
- transform: scale(1.3);
22
- }
23
-
24
- #leaderboard-table {
25
- margin-top: 15px
26
- }
27
-
28
- #leaderboard-table-lite {
29
- margin-top: 15px
30
- }
31
-
32
- #search-bar-table-box > div:first-child {
33
- background: none;
34
- border: none;
35
- }
36
-
37
- #search-bar {
38
- padding: 0px;
39
- }
40
-
41
- /* Limit the width of the first AutoEvalColumn so that names don't expand too much */
42
- #leaderboard-table td:nth-child(2),
43
- #leaderboard-table th:nth-child(2) {
44
- max-width: 400px;
45
- overflow: auto;
46
- white-space: nowrap;
47
- }
48
-
49
- .tab-buttons button {
50
- font-size: 20px;
51
- }
52
-
53
- #scale-logo {
54
- border-style: none !important;
55
- box-shadow: none;
56
- display: block;
57
- margin-left: auto;
58
- margin-right: auto;
59
- max-width: 600px;
60
- }
61
-
62
- #scale-logo .download {
63
- display: none;
64
- }
65
- #filter_type{
66
- border: 0;
67
- padding-left: 0;
68
- padding-top: 0;
69
- }
70
- #filter_type label {
71
- display: flex;
72
- }
73
- #filter_type label > span{
74
- margin-top: var(--spacing-lg);
75
- margin-right: 0.5em;
76
- }
77
- #filter_type label > .wrap{
78
- width: 103px;
79
- }
80
- #filter_type label > .wrap .wrap-inner{
81
- padding: 2px;
82
- }
83
- #filter_type label > .wrap .wrap-inner input{
84
- width: 1px
85
- }
86
- #filter-columns-type{
87
- border:0;
88
- padding:0.5;
89
- }
90
- #filter-columns-size{
91
- border:0;
92
- padding:0.5;
93
- }
94
- #box-filter > .form{
95
- border: 0
96
- }
97
- """
98
-
99
- get_window_url_params = """
100
- function(url_params) {
101
- const params = new URLSearchParams(window.location.search);
102
- url_params = Object.fromEntries(params);
103
- return url_params;
104
- }
105
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/formatting.py DELETED
@@ -1,27 +0,0 @@
1
- def model_hyperlink(link, model_name):
2
- return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
3
-
4
-
5
- def make_clickable_model(model_name):
6
- link = f"https://huggingface.co/{model_name}"
7
- return model_hyperlink(link, model_name)
8
-
9
-
10
- def styled_error(error):
11
- return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
12
-
13
-
14
- def styled_warning(warn):
15
- return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
16
-
17
-
18
- def styled_message(message):
19
- return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"
20
-
21
-
22
- def has_no_nan_values(df, columns):
23
- return df[columns].notna().all(axis=1)
24
-
25
-
26
- def has_nan_values(df, columns):
27
- return df[columns].isna().any(axis=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/display/utils.py DELETED
@@ -1,116 +0,0 @@
1
- from dataclasses import dataclass, make_dataclass
2
- from enum import Enum
3
-
4
- import pandas as pd
5
-
6
- from src.about import Tasks
7
-
8
- def fields(raw_class):
9
- return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
10
-
11
-
12
- # These classes are for user facing column names,
13
- # to avoid having to change them all around the code
14
- # when a modif is needed
15
- @dataclass
16
- class ColumnContent:
17
- name: str
18
- type: str
19
- displayed_by_default: bool
20
- hidden: bool = False
21
- never_hidden: bool = False
22
-
23
- ## Leaderboard columns
24
- auto_eval_column_dict = []
25
- # Init
26
- auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
- auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
- # Average kept but not displayed by default
29
- auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average ⬆️", "number", False)])
30
-
31
- # Determine which metrics are visible by default
32
- _DEFAULT_VISIBLE = {"bleu", "multimetric", "pass_at_1", "pass_at_5", "pass_at_10"}
33
-
34
- for task in Tasks:
35
- show = task.name in _DEFAULT_VISIBLE
36
- auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", show)])
37
-
38
- # Model information
39
- auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
40
- auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
41
- auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
42
- auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
43
- auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)])
44
- auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
45
- auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)])
46
- auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
47
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
48
-
49
- # We use make dataclass to dynamically fill the scores from Tasks
50
- AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
51
-
52
- ## For the queue columns in the submission tab
53
- @dataclass(frozen=True)
54
- class EvalQueueColumn: # Queue column
55
- model = ColumnContent("model", "markdown", True)
56
- revision = ColumnContent("revision", "str", True)
57
- private = ColumnContent("private", "bool", True)
58
- precision = ColumnContent("precision", "str", True)
59
- weight_type = ColumnContent("weight_type", "str", "Original")
60
- status = ColumnContent("status", "str", True)
61
-
62
- ## All the model information that we might need
63
- @dataclass
64
- class ModelDetails:
65
- name: str
66
- display_name: str = ""
67
- symbol: str = "" # emoji
68
-
69
-
70
- class ModelType(Enum):
71
- PT = ModelDetails(name="pretrained", symbol="🟢")
72
- FT = ModelDetails(name="fine-tuned", symbol="🔶")
73
- IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
74
- RL = ModelDetails(name="RL-tuned", symbol="🟦")
75
- Unknown = ModelDetails(name="", symbol="?")
76
-
77
- def to_str(self, separator=" "):
78
- return f"{self.value.symbol}{separator}{self.value.name}"
79
-
80
- @staticmethod
81
- def from_str(type):
82
- if "fine-tuned" in type or "🔶" in type:
83
- return ModelType.FT
84
- if "pretrained" in type or "🟢" in type:
85
- return ModelType.PT
86
- if "RL-tuned" in type or "🟦" in type:
87
- return ModelType.RL
88
- if "instruction-tuned" in type or "⭕" in type:
89
- return ModelType.IFT
90
- return ModelType.Unknown
91
-
92
- class WeightType(Enum):
93
- Adapter = ModelDetails("Adapter")
94
- Original = ModelDetails("Original")
95
- Delta = ModelDetails("Delta")
96
-
97
- class Precision(Enum):
98
- float16 = ModelDetails("float16")
99
- bfloat16 = ModelDetails("bfloat16")
100
- Unknown = ModelDetails("?")
101
-
102
- def from_str(precision):
103
- if precision in ["torch.float16", "float16"]:
104
- return Precision.float16
105
- if precision in ["torch.bfloat16", "bfloat16"]:
106
- return Precision.bfloat16
107
- return Precision.Unknown
108
-
109
- # Column selection
110
- COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
111
-
112
- EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
113
- EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
114
-
115
- BENCHMARK_COLS = [t.value.col_name for t in Tasks]
116
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/envs.py DELETED
@@ -1,25 +0,0 @@
1
- import os
2
-
3
- from huggingface_hub import HfApi
4
-
5
- # Info to change for your repository
6
- # ----------------------------------
7
- TOKEN = os.environ.get("HF_TOKEN") # A read/write token for your org
8
-
9
- OWNER = "demo-leaderboard-backend" # Change to your org - don't forget to create a results and request dataset, with the correct format!
10
- # ----------------------------------
11
-
12
- REPO_ID = f"{OWNER}/leaderboard"
13
- QUEUE_REPO = f"{OWNER}/requests"
14
- RESULTS_REPO = f"{OWNER}/results"
15
-
16
- # If you setup a cache later, just change HF_HOME
17
- CACHE_PATH=os.getenv("HF_HOME", ".")
18
-
19
- # Local caches
20
- EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue")
21
- EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results")
22
- EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
23
- EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
24
-
25
- API = HfApi(token=TOKEN)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/leaderboard/read_evals.py DELETED
@@ -1,199 +0,0 @@
1
- import glob
2
- import json
3
- import math
4
- import os
5
- from dataclasses import dataclass
6
-
7
- import dateutil
8
- import numpy as np
9
-
10
- from src.display.formatting import make_clickable_model
11
- from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
- from src.submission.check_validity import is_model_on_hub
13
-
14
-
15
- @dataclass
16
- class EvalResult:
17
- """Represents one full evaluation. Built from a combination of the result and request file for a given run.
18
- """
19
- eval_name: str # org_model_precision (uid)
20
- full_model: str # org/model (path on hub)
21
- org: str
22
- model: str
23
- revision: str # commit hash, "" if main
24
- results: dict
25
- precision: Precision = Precision.Unknown
26
- model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
27
- weight_type: WeightType = WeightType.Original # Original or Adapter
28
- architecture: str = "Unknown"
29
- license: str = "?"
30
- likes: int = 0
31
- num_params: int = 0
32
- date: str = "" # submission date of request file
33
- still_on_hub: bool = False
34
-
35
- @classmethod
36
- def init_from_json_file(self, json_filepath):
37
- """Inits the result from the specific model result file"""
38
- with open(json_filepath) as fp:
39
- data = json.load(fp)
40
-
41
- config = data.get("config")
42
-
43
- # Precision
44
- precision = Precision.from_str(config.get("model_dtype"))
45
-
46
- # Get model and org
47
- org_and_model = config.get("model_name", config.get("model_args", None))
48
- org_and_model = org_and_model.split("/", 1)
49
-
50
- if len(org_and_model) == 1:
51
- org = None
52
- model = org_and_model[0]
53
- result_key = f"{model}_{precision.value.name}"
54
- else:
55
- org = org_and_model[0]
56
- model = org_and_model[1]
57
- result_key = f"{org}_{model}_{precision.value.name}"
58
- full_model = "/".join(org_and_model)
59
-
60
- still_on_hub, _, model_config = is_model_on_hub(
61
- full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
62
- )
63
- architecture = "?"
64
- if model_config is not None:
65
- architectures = getattr(model_config, "architectures", None)
66
- if architectures:
67
- architecture = ";".join(architectures)
68
-
69
- # Extract results available in this file (some results are split in several files)
70
- results = {}
71
- for task in Tasks:
72
- task = task.value
73
-
74
- # We average all scores of a given metric (not all metrics are present in all files)
75
- accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark == k])
76
- if accs.size == 0 or any([acc is None for acc in accs]):
77
- continue
78
-
79
- if task.metric == "score":
80
- mean_acc = float(np.mean(accs))
81
- else:
82
- mean_acc = float(np.mean(accs) * 100.0)
83
- results[task.benchmark] = mean_acc
84
-
85
- return self(
86
- eval_name=result_key,
87
- full_model=full_model,
88
- org=org,
89
- model=model,
90
- results=results,
91
- precision=precision,
92
- revision= config.get("model_sha", ""),
93
- still_on_hub=still_on_hub,
94
- architecture=architecture
95
- )
96
-
97
- def update_with_request_file(self, requests_path):
98
- """Finds the relevant request file for the current model and updates info with it"""
99
- request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name)
100
-
101
- try:
102
- with open(request_file, "r") as f:
103
- request = json.load(f)
104
- self.model_type = ModelType.from_str(request.get("model_type", ""))
105
- self.weight_type = WeightType[request.get("weight_type", "Original")]
106
- self.license = request.get("license", "?")
107
- self.likes = request.get("likes", 0)
108
- self.num_params = request.get("params", 0)
109
- self.date = request.get("submitted_time", "")
110
- except Exception:
111
- print(f"Could not find request file for {self.org}/{self.model} with precision {self.precision.value.name}")
112
-
113
- def to_dict(self):
114
- """Converts the Eval Result to a dict compatible with our dataframe display"""
115
- average = sum([v for v in self.results.values() if v is not None]) / len(Tasks)
116
- data_dict = {
117
- "eval_name": self.eval_name, # not a column, just a save name,
118
- AutoEvalColumn.precision.name: self.precision.value.name,
119
- AutoEvalColumn.model_type.name: self.model_type.value.name,
120
- AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
121
- AutoEvalColumn.weight_type.name: self.weight_type.value.name,
122
- AutoEvalColumn.architecture.name: self.architecture,
123
- AutoEvalColumn.model.name: make_clickable_model(self.full_model),
124
- AutoEvalColumn.revision.name: self.revision,
125
- AutoEvalColumn.average.name: average,
126
- AutoEvalColumn.license.name: self.license,
127
- AutoEvalColumn.likes.name: self.likes,
128
- AutoEvalColumn.params.name: self.num_params,
129
- AutoEvalColumn.still_on_hub.name: self.still_on_hub,
130
- }
131
-
132
- for task in Tasks:
133
- data_dict[task.value.col_name] = self.results[task.value.benchmark]
134
-
135
- return data_dict
136
-
137
-
138
- def get_request_file_for_model(requests_path, model_name, precision):
139
- """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
140
- request_files = os.path.join(
141
- requests_path,
142
- f"{model_name}_eval_request_*.json",
143
- )
144
- request_files = glob.glob(request_files)
145
-
146
- # Select correct request file (precision)
147
- request_file = ""
148
- request_files = sorted(request_files, reverse=True)
149
- for tmp_request_file in request_files:
150
- with open(tmp_request_file, "r") as f:
151
- req_content = json.load(f)
152
- if (
153
- req_content["status"] in ["FINISHED"]
154
- and req_content["precision"] == precision.split(".")[-1]
155
- ):
156
- request_file = tmp_request_file
157
- return request_file
158
-
159
-
160
- def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
161
- """From the path of the results folder root, extract all needed info for results"""
162
- model_result_filepaths = []
163
-
164
- for root, _, files in os.walk(results_path):
165
- # We should only have json files in model results
166
- if len(files) == 0 or any([not f.endswith(".json") for f in files]):
167
- continue
168
-
169
- # Sort the files by date
170
- try:
171
- files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7])
172
- except dateutil.parser._parser.ParserError:
173
- files = [files[-1]]
174
-
175
- for file in files:
176
- model_result_filepaths.append(os.path.join(root, file))
177
-
178
- eval_results = {}
179
- for model_result_filepath in model_result_filepaths:
180
- # Creation of result
181
- eval_result = EvalResult.init_from_json_file(model_result_filepath)
182
- eval_result.update_with_request_file(requests_path)
183
-
184
- # Store results of same eval together
185
- eval_name = eval_result.eval_name
186
- if eval_name in eval_results.keys():
187
- eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None})
188
- else:
189
- eval_results[eval_name] = eval_result
190
-
191
- results = []
192
- for v in eval_results.values():
193
- try:
194
- v.to_dict() # we test if the dict version is complete
195
- results.append(v)
196
- except KeyError: # not all eval values present
197
- continue
198
-
199
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/populate.py DELETED
@@ -1,102 +0,0 @@
1
- import json
2
- import os
3
-
4
- import pandas as pd
5
-
6
- from src.display.formatting import has_no_nan_values, make_clickable_model
7
- from src.display.utils import AutoEvalColumn, EvalQueueColumn
8
- from src.leaderboard.read_evals import get_raw_eval_results
9
-
10
-
11
- def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
12
- """Creates a dataframe from all the individual experiment results"""
13
- raw_data = get_raw_eval_results(results_path, requests_path)
14
- all_data_json = [v.to_dict() for v in raw_data]
15
-
16
- df = pd.DataFrame.from_records(all_data_json)
17
-
18
- # ------------------------------------------------------------------
19
- # Fallback: if no evaluation results are found we populate the
20
- # leaderboard with a single example model. This guarantees that a
21
- # freshly deployed Space shows a non-empty leaderboard and it serves
22
- # as a template for the expected columns/values.
23
- # ------------------------------------------------------------------
24
- if df.empty:
25
- example_row = {}
26
-
27
- # Populate benchmark metrics with the default value 0.5 using internal column names
28
- for metric in benchmark_cols:
29
- example_row[metric] = 0.5
30
-
31
- # Minimal metadata so that the row displays nicely
32
- example_row[AutoEvalColumn.model.name] = make_clickable_model("example/model")
33
- example_row[AutoEvalColumn.average.name] = 0.5
34
- example_row[AutoEvalColumn.model_type_symbol.name] = "🟢"
35
- example_row[AutoEvalColumn.model_type.name] = "pretrained"
36
- example_row[AutoEvalColumn.precision.name] = "float16"
37
- example_row[AutoEvalColumn.weight_type.name] = "Original"
38
- example_row[AutoEvalColumn.still_on_hub.name] = True
39
- example_row[AutoEvalColumn.architecture.name] = "Transformer"
40
- example_row[AutoEvalColumn.revision.name] = "main"
41
- example_row[AutoEvalColumn.license.name] = "apache-2.0"
42
-
43
- # Any missing columns will be created later in the function
44
- df = pd.DataFrame([example_row])
45
-
46
- # Sort primarily by LLM exact-match Pass@1 metric; if not present, fall back to average
47
- preferred_cols = []
48
- if hasattr(AutoEvalColumn, "pass_at_1"):
49
- preferred_cols.append(AutoEvalColumn.pass_at_1.name)
50
- preferred_cols.append(AutoEvalColumn.average.name)
51
-
52
- for col in preferred_cols:
53
- if col in df.columns:
54
- df = df.sort_values(by=[col], ascending=False)
55
- break
56
-
57
- # Ensure all expected columns exist, add missing ones with NaN so selection does not fail
58
- for expected in cols:
59
- if expected not in df.columns:
60
- df[expected] = pd.NA
61
-
62
- df = df[cols].round(decimals=2)
63
-
64
- # filter out if any of the benchmarks have not been produced
65
- df = df[has_no_nan_values(df, benchmark_cols)]
66
- return df
67
-
68
-
69
- def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
70
- """Creates the different dataframes for the evaluation queues requestes"""
71
- entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
72
- all_evals = []
73
-
74
- for entry in entries:
75
- if ".json" in entry:
76
- file_path = os.path.join(save_path, entry)
77
- with open(file_path) as fp:
78
- data = json.load(fp)
79
-
80
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
81
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
82
-
83
- all_evals.append(data)
84
- elif ".md" not in entry:
85
- # this is a folder
86
- sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(e) and not e.startswith(".")]
87
- for sub_entry in sub_entries:
88
- file_path = os.path.join(save_path, entry, sub_entry)
89
- with open(file_path) as fp:
90
- data = json.load(fp)
91
-
92
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
93
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
94
- all_evals.append(data)
95
-
96
- pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
97
- running_list = [e for e in all_evals if e["status"] == "RUNNING"]
98
- finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
99
- df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
100
- df_running = pd.DataFrame.from_records(running_list, columns=cols)
101
- df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
102
- return df_finished[cols], df_running[cols], df_pending[cols]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/submission/check_validity.py DELETED
@@ -1,99 +0,0 @@
1
- import json
2
- import os
3
- import re
4
- from collections import defaultdict
5
- from datetime import datetime, timedelta, timezone
6
-
7
- import huggingface_hub
8
- from huggingface_hub import ModelCard
9
- from huggingface_hub.hf_api import ModelInfo
10
- from transformers import AutoConfig
11
- from transformers.models.auto.tokenization_auto import AutoTokenizer
12
-
13
- def check_model_card(repo_id: str) -> tuple[bool, str]:
14
- """Checks if the model card and license exist and have been filled"""
15
- try:
16
- card = ModelCard.load(repo_id)
17
- except huggingface_hub.utils.EntryNotFoundError:
18
- return False, "Please add a model card to your model to explain how you trained/fine-tuned it."
19
-
20
- # Enforce license metadata
21
- if card.data.license is None:
22
- if not ("license_name" in card.data and "license_link" in card.data):
23
- return False, (
24
- "License not found. Please add a license to your model card using the `license` metadata or a"
25
- " `license_name`/`license_link` pair."
26
- )
27
-
28
- # Enforce card content
29
- if len(card.text) < 200:
30
- return False, "Please add a description to your model card, it is too short."
31
-
32
- return True, ""
33
-
34
- def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
35
- """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
36
- try:
37
- config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
38
- if test_tokenizer:
39
- try:
40
- tk = AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
41
- except ValueError as e:
42
- return (
43
- False,
44
- f"uses a tokenizer which is not in a transformers release: {e}",
45
- None
46
- )
47
- except Exception as e:
48
- return (False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None)
49
- return True, None, config
50
-
51
- except ValueError:
52
- return (
53
- False,
54
- "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.",
55
- None
56
- )
57
-
58
- except Exception as e:
59
- return False, "was not found on hub!", None
60
-
61
-
62
- def get_model_size(model_info: ModelInfo, precision: str):
63
- """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
64
- try:
65
- model_size = round(model_info.safetensors["total"] / 1e9, 3)
66
- except (AttributeError, TypeError):
67
- return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
68
-
69
- size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
70
- model_size = size_factor * model_size
71
- return model_size
72
-
73
- def get_model_arch(model_info: ModelInfo):
74
- """Gets the model architecture from the configuration"""
75
- return model_info.config.get("architectures", "Unknown")
76
-
77
- def already_submitted_models(requested_models_dir: str) -> set[str]:
78
- """Gather a list of already submitted models to avoid duplicates"""
79
- depth = 1
80
- file_names = []
81
- users_to_submission_dates = defaultdict(list)
82
-
83
- for root, _, files in os.walk(requested_models_dir):
84
- current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
85
- if current_depth == depth:
86
- for file in files:
87
- if not file.endswith(".json"):
88
- continue
89
- with open(os.path.join(root, file), "r") as f:
90
- info = json.load(f)
91
- file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
92
-
93
- # Select organisation
94
- if info["model"].count("/") == 0 or "submitted_time" not in info:
95
- continue
96
- organisation, _ = info["model"].split("/")
97
- users_to_submission_dates[organisation].append(info["submitted_time"])
98
-
99
- return set(file_names), users_to_submission_dates
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/submission/submit.py DELETED
@@ -1,248 +0,0 @@
1
- import json
2
- import os
3
- from datetime import datetime, timezone
4
-
5
- from src.display.formatting import styled_error, styled_message, styled_warning
6
- from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO, EVAL_RESULTS_PATH, RESULTS_REPO
7
- from src.submission.check_validity import (
8
- already_submitted_models,
9
- check_model_card,
10
- get_model_size,
11
- is_model_on_hub,
12
- )
13
-
14
- REQUESTED_MODELS = None
15
- USERS_TO_SUBMISSION_DATES = None
16
-
17
- def add_new_eval(
18
- model: str,
19
- base_model: str,
20
- revision: str,
21
- precision: str,
22
- weight_type: str,
23
- model_type: str,
24
- ):
25
- global REQUESTED_MODELS
26
- global USERS_TO_SUBMISSION_DATES
27
- if not REQUESTED_MODELS:
28
- REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH)
29
-
30
- user_name = ""
31
- model_path = model
32
- if "/" in model:
33
- user_name = model.split("/")[0]
34
- model_path = model.split("/")[1]
35
-
36
- precision = precision.split(" ")[0]
37
- current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
38
-
39
- if model_type is None or model_type == "":
40
- return styled_error("Please select a model type.")
41
-
42
- # Does the model actually exist?
43
- if revision == "":
44
- revision = "main"
45
-
46
- # Is the model on the hub?
47
- if weight_type in ["Delta", "Adapter"]:
48
- base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
49
- if not base_model_on_hub:
50
- return styled_error(f'Base model "{base_model}" {error}')
51
-
52
- if not weight_type == "Adapter":
53
- model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
54
- if not model_on_hub:
55
- return styled_error(f'Model "{model}" {error}')
56
-
57
- # Is the model info correctly filled?
58
- try:
59
- model_info = API.model_info(repo_id=model, revision=revision)
60
- except Exception:
61
- return styled_error("Could not get your model information. Please fill it up properly.")
62
-
63
- model_size = get_model_size(model_info=model_info, precision=precision)
64
-
65
- # Were the model card and license filled?
66
- try:
67
- license = model_info.cardData["license"]
68
- except Exception:
69
- return styled_error("Please select a license for your model")
70
-
71
- modelcard_OK, error_msg = check_model_card(model)
72
- if not modelcard_OK:
73
- return styled_error(error_msg)
74
-
75
- # Seems good, creating the eval
76
- print("Adding new eval")
77
-
78
- eval_entry = {
79
- "model": model,
80
- "base_model": base_model,
81
- "revision": revision,
82
- "precision": precision,
83
- "weight_type": weight_type,
84
- "status": "PENDING",
85
- "submitted_time": current_time,
86
- "model_type": model_type,
87
- "likes": model_info.likes,
88
- "params": model_size,
89
- "license": license,
90
- "private": False,
91
- }
92
-
93
- # Check for duplicate submission
94
- if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
95
- return styled_warning("This model has been already submitted.")
96
-
97
- print("Creating eval file")
98
- OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
99
- os.makedirs(OUT_DIR, exist_ok=True)
100
- out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
101
-
102
- with open(out_path, "w") as f:
103
- f.write(json.dumps(eval_entry))
104
-
105
- print("Uploading eval file")
106
- API.upload_file(
107
- path_or_fileobj=out_path,
108
- path_in_repo=out_path.split("eval-queue/")[1],
109
- repo_id=QUEUE_REPO,
110
- repo_type="dataset",
111
- commit_message=f"Add {model} to eval queue",
112
- )
113
-
114
- # Remove the local file
115
- os.remove(out_path)
116
-
117
- return styled_message(
118
- "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
119
- )
120
-
121
- # --------------------------------------------------------
122
- # Manual metrics submission (bypass evaluation queue)
123
- # --------------------------------------------------------
124
-
125
- ALL_SUBJECTIVE_FIELDS = [
126
- "readability",
127
- "relevance",
128
- "explanation_clarity",
129
- "problem_identification",
130
- "actionability",
131
- "completeness",
132
- "specificity",
133
- "contextual_adequacy",
134
- "consistency",
135
- "brevity",
136
- ]
137
-
138
- def _compute_multimetric(payload: dict) -> float:
139
- """Average of the 10 subjective metrics."""
140
- total = sum(float(payload[f]) for f in ALL_SUBJECTIVE_FIELDS)
141
- return total / len(ALL_SUBJECTIVE_FIELDS)
142
-
143
- def add_manual_results(
144
- model: str,
145
- revision: str,
146
- bleu: float,
147
- readability: int,
148
- relevance: int,
149
- explanation_clarity: int,
150
- problem_identification: int,
151
- actionability: int,
152
- completeness: int,
153
- specificity: int,
154
- contextual_adequacy: int,
155
- consistency: int,
156
- brevity: int,
157
- pass_at_1: float,
158
- pass_at_5: float,
159
- pass_at_10: float,
160
- ):
161
- """Directly submit evaluation metrics for a model and push them to the results dataset."""
162
-
163
- # Basic validation
164
- if model == "":
165
- return styled_error("Please specify a model name.")
166
-
167
- if revision == "":
168
- revision = "main"
169
-
170
- if pass_at_5 < pass_at_1:
171
- return styled_error("pass@5 must be greater or equal to pass@1")
172
- if pass_at_10 < pass_at_5:
173
- return styled_error("pass@10 must be greater or equal to pass@5")
174
-
175
- # Prepare dictionary in the same format used by read_evals.py
176
- payload_dict = {
177
- "model": model,
178
- "revision": revision,
179
- "bleu": bleu,
180
- "readability": readability,
181
- "relevance": relevance,
182
- "explanation_clarity": explanation_clarity,
183
- "problem_identification": problem_identification,
184
- "actionability": actionability,
185
- "completeness": completeness,
186
- "specificity": specificity,
187
- "contextual_adequacy": contextual_adequacy,
188
- "consistency": consistency,
189
- "brevity": brevity,
190
- "pass_at_1": pass_at_1,
191
- "pass_at_5": pass_at_5,
192
- "pass_at_10": pass_at_10,
193
- }
194
-
195
- multimetric = _compute_multimetric(payload_dict)
196
-
197
- # Compose final results file (same structure as api_submit_results)
198
- result_json = {
199
- "config": {
200
- "model_dtype": "unknown",
201
- "model_name": model,
202
- "model_sha": revision,
203
- },
204
- "results": {
205
- "bleu": {"score": bleu},
206
- "multimetric": {"score": multimetric},
207
- "pass_at_1": {"score": pass_at_1},
208
- "pass_at_5": {"score": pass_at_5},
209
- "pass_at_10": {"score": pass_at_10},
210
- },
211
- }
212
-
213
- # Add subjective metrics
214
- for field in ALL_SUBJECTIVE_FIELDS:
215
- result_json["results"][field] = {"score": payload_dict[field]}
216
-
217
- # Write file locally then upload
218
- try:
219
- os.makedirs(EVAL_RESULTS_PATH, exist_ok=True)
220
- except Exception:
221
- pass
222
-
223
- from datetime import datetime, timezone
224
- import uuid
225
-
226
- ts = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
227
- unique_id = uuid.uuid4().hex[:8]
228
- filename = f"results_{model.replace('/', '_')}_{ts}_{unique_id}.json"
229
- local_path = os.path.join(EVAL_RESULTS_PATH, filename)
230
-
231
- try:
232
- with open(local_path, "w") as fp:
233
- json.dump(result_json, fp)
234
-
235
- API.upload_file(
236
- path_or_fileobj=local_path,
237
- path_in_repo=filename,
238
- repo_id=RESULTS_REPO,
239
- repo_type="dataset",
240
- commit_message=f"Add manual results for {model}",
241
- )
242
- except Exception as e:
243
- return styled_error(f"Failed to upload results: {e}")
244
- finally:
245
- if os.path.exists(local_path):
246
- os.remove(local_path)
247
-
248
- return styled_message("Metrics successfully submitted! The leaderboard will refresh shortly.")