Alex commited on
Commit
5cfc290
·
1 Parent(s): 4ae71b6
Files changed (1) hide show
  1. app.py +33 -75
app.py CHANGED
@@ -4,7 +4,6 @@ from typing import List, Dict
4
 
5
  import gradio as gr
6
  from pydantic import BaseModel, Field, field_validator
7
- from fastapi import FastAPI, HTTPException
8
 
9
  # --------------- Configuration ---------------
10
  LEADERBOARD_PATH = Path("leaderboard_data.json")
@@ -187,79 +186,38 @@ with gr.Blocks(title="Custom LLM Leaderboard") as demo:
187
  brevity_inp,
188
  ],
189
  outputs=[leaderboard_df, status_markdown],
 
190
  )
191
 
192
- # ----------------- FastAPI backend -----------------
193
-
194
- backend = FastAPI(title="LLM Leaderboard API")
195
-
196
-
197
- class APISubmission(BaseModel):
198
- model_name: str
199
- bleu: float
200
- llm_pass_1: float
201
- llm_pass_5: float
202
- llm_pass_10: float
203
- readability: float
204
- relevance: float
205
- explanation_clarity: float
206
- problem_identification: float
207
- actionability: float
208
- completeness: float
209
- specificity: float
210
- contextual_adequacy: float
211
- consistency: float
212
- brevity: float
213
-
214
-
215
- @backend.post("/submit")
216
- def api_submit(payload: APISubmission):
217
- """Submit results via raw HTTP POST (JSON)."""
218
- try:
219
- LeaderboardEntry(
220
- model_name=payload.model_name.strip(),
221
- bleu=payload.bleu,
222
- llm_pass_1=payload.llm_pass_1,
223
- llm_pass_5=payload.llm_pass_5,
224
- llm_pass_10=payload.llm_pass_10,
225
- metrics={
226
- "readability": payload.readability,
227
- "relevance": payload.relevance,
228
- "explanation_clarity": payload.explanation_clarity,
229
- "problem_identification": payload.problem_identification,
230
- "actionability": payload.actionability,
231
- "completeness": payload.completeness,
232
- "specificity": payload.specificity,
233
- "contextual_adequacy": payload.contextual_adequacy,
234
- "consistency": payload.consistency,
235
- "brevity": payload.brevity,
236
- },
237
- )
238
- except Exception as e:
239
- raise HTTPException(status_code=400, detail=str(e))
240
-
241
- # If valid, reuse same logic
242
- submit_model(
243
- payload.model_name,
244
- payload.bleu,
245
- payload.llm_pass_1,
246
- payload.llm_pass_5,
247
- payload.llm_pass_10,
248
- payload.readability,
249
- payload.relevance,
250
- payload.explanation_clarity,
251
- payload.problem_identification,
252
- payload.actionability,
253
- payload.completeness,
254
- payload.specificity,
255
- payload.contextual_adequacy,
256
- payload.consistency,
257
- payload.brevity,
258
- )
259
-
260
- return {"status": "ok", "message": "Submission stored"}
261
-
262
-
263
- # ----------------- Mount Gradio -----------------
264
-
265
- app = gr.mount_gradio_app(backend, demo, path="/")
 
4
 
5
  import gradio as gr
6
  from pydantic import BaseModel, Field, field_validator
 
7
 
8
  # --------------- Configuration ---------------
9
  LEADERBOARD_PATH = Path("leaderboard_data.json")
 
186
  brevity_inp,
187
  ],
188
  outputs=[leaderboard_df, status_markdown],
189
+ api_name="submit_model",
190
  )
191
 
192
+ # Ensure API endpoint for programmatic submissions
193
+ submit_btn.click(
194
+ fn=submit_model,
195
+ inputs=[
196
+ model_name_inp,
197
+ bleu_inp,
198
+ pass1_inp,
199
+ pass5_inp,
200
+ pass10_inp,
201
+ readability_inp,
202
+ relevance_inp,
203
+ explanation_inp,
204
+ problem_inp,
205
+ actionability_inp,
206
+ completeness_inp,
207
+ specificity_inp,
208
+ contextual_inp,
209
+ consistency_inp,
210
+ brevity_inp,
211
+ ],
212
+ outputs=[leaderboard_df, status_markdown],
213
+ api_name="submit_model",
214
+ )
215
+
216
+
217
+ # ----------------- Launch -----------------
218
+
219
+ if __name__ == "__main__":
220
+ demo.queue().launch()
221
+
222
+ # For HF Spaces runtime (gradio SDK) expose `demo`
223
+ app = demo