File size: 1,366 Bytes
3d8dbe8 8b1f7a0 3b3db42 01ea22b 29546b4 3d8dbe8 91e8a06 8b1f7a0 29546b4 3d8dbe8 bc7425f 3d8dbe8 8b1f7a0 29546b4 8b1f7a0 3d8dbe8 8b1f7a0 2a860f6 8b1f7a0 ceb2102 8b1f7a0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
from dataclasses import dataclass, make_dataclass
from enum import Enum
import pandas as pd
from src.about import Tasks
def fields(raw_class):
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
# These classes are for user facing column names,
# to avoid having to change them all around the code
# when a modif is needed
@dataclass
class ColumnContent:
name: str
type: str
displayed_by_default: bool
hidden: bool = False
never_hidden: bool = False
## Leaderboard columns
auto_eval_column_dict = []
# Init
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
#Scores
auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average", "number", True)])
for task in Tasks:
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
# We use make dataclass to dynamically fill the scores from Tasks
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
## All the model information that we might need
@dataclass
class ModelDetails:
name: str
display_name: str = ""
symbol: str = "" # emoji
# Column selection
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
BENCHMARK_COLS = [t.value.col_name for t in Tasks]
|