Spaces:
Running
Running
File size: 23,708 Bytes
9ae8d89 09b313f 9ae8d89 09b313f 9ae8d89 09b313f 9ae8d89 09b313f c63935d 09b313f 0da5ee3 09b313f 0da5ee3 09b313f 57fd1ce 0a14325 d83f3a1 ede354a d83f3a1 0a14325 ba515db 5c80286 9ae8d89 553b217 faceee1 553b217 faceee1 c92b14d 553b217 faceee1 c92b14d 553b217 20dad4a fb84311 20dad4a fb84311 20dad4a fb84311 20dad4a 09b313f 9ae8d89 b50c184 5a7f217 a71f0d3 96ca081 9ae8d89 818cb65 a111e91 9ae8d89 09b313f 96ca081 a2d8d52 96ca081 818cb65 a2d8d52 8c295d9 a2d8d52 8c295d9 b78ec05 0da091e 8c295d9 9e77e60 8c295d9 0da091e 8c295d9 0da091e 5c80286 8c295d9 5c80286 0da091e 5c80286 0da091e 5c80286 09b313f 27e5b96 09b313f 27e5b96 09b313f 27e5b96 09b313f 27e5b96 9ae8d89 27e5b96 9ae8d89 27e5b96 9ae8d89 27e5b96 9ae8d89 27e5b96 9ae8d89 09b313f 9ae8d89 23fd02c 3c09632 23fd02c 3c09632 23fd02c fb84311 dc56e08 fb84311 9ae8d89 27e5b96 09b313f 27e5b96 09b313f 27e5b96 9ae8d89 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 |
from dataclasses import dataclass
from enum import Enum
@dataclass
class HarnessTask:
benchmark: str
metric: str
col_name: str
# Select your tasks here
# ---------------------------------------------------
class HarnessTasks(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
# task0 = Task("anli_r1", "acc", "ANLI")
# task1 = Task("logiqa", "acc_norm", "LogiQA")
task0 = HarnessTask("MMLU", "accuracy", "MMLU")
task1 = HarnessTask("MMLU-Pro", "accuracy", "MMLU-Pro")
task2 = HarnessTask("MedMCQA", "accuracy", "MedMCQA")
task3 = HarnessTask("MedQA", "accuracy", "MedQA")
task4 = HarnessTask("USMLE", "accuracy", "USMLE")
task5 = HarnessTask("PubMedQA", "accuracy", "PubMedQA")
task6 = HarnessTask("ToxiGen", "accuracy", "ToxiGen")
# task7 = HarnessTask("Average", "accuracy", "Harness-Average")
# task5 = Task("", "f1", "")
# task6 = Task("", "f1", "")
@dataclass
class OpenEndedColumn:
benchmark: str
metric: str
col_name: str
class OpenEndedColumns(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
column0 = OpenEndedColumn("ELO", "score", "ELO")
column1 = OpenEndedColumn("ELO_intervals", "score", "ELO 95% CI")
column2 = OpenEndedColumn("Score", "score", "Score")
column3 = OpenEndedColumn("Score_intervals", "score", "Score 95% CI")
# changes to be made here
@dataclass
class OpenEndedMultilingualColumn:
benchmark: str
metric: str
col_name: str
class OpenEndedArabicColumn(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
arabic_column0 = OpenEndedMultilingualColumn("ELO", "score", "ELO")
arabic_column1 = OpenEndedMultilingualColumn("ELO_intervals", "score", "ELO 95% CI")
arabic_column2 = OpenEndedMultilingualColumn("Score", "score", "Score")
arabic_column3 = OpenEndedMultilingualColumn("Score_intervals", "score", "Score 95% CI")
class OpenEndedFrenchColumn(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
french_column0 = OpenEndedMultilingualColumn("ELO", "score", "ELO")
french_column1 = OpenEndedMultilingualColumn("ELO_intervals", "score", "ELO 95% CI")
french_column2 = OpenEndedMultilingualColumn("Score", "score", "Score")
french_column3 = OpenEndedMultilingualColumn("Score_intervals", "score", "Score 95% CI")
class OpenEndedSpanishColumn(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
spanish_column0 = OpenEndedMultilingualColumn("ELO", "score", "ELO")
spanish_column1 = OpenEndedMultilingualColumn("ELO_intervals", "score", "ELO 95% CI")
spanish_column2 = OpenEndedMultilingualColumn("Score", "score", "Score")
spanish_column3 = OpenEndedMultilingualColumn("Score_intervals", "score", "Score 95% CI")
class OpenEndedPortugueseColumn(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
porto_column0 = OpenEndedMultilingualColumn("ELO", "score", "ELO")
porto_column1 = OpenEndedMultilingualColumn("ELO_intervals", "score", "ELO 95% CI")
porto_column2 = OpenEndedMultilingualColumn("Score", "score", "Score")
porto_column3 = OpenEndedMultilingualColumn("Score_intervals", "score", "Score 95% CI")
class OpenEndedRomanianColumn(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
rom_column0 = OpenEndedMultilingualColumn("ELO", "score", "ELO")
rom_column1 = OpenEndedMultilingualColumn("ELO_intervals", "score", "ELO 95% CI")
rom_column2 = OpenEndedMultilingualColumn("Score", "score", "Score")
rom_column3 = OpenEndedMultilingualColumn("Score_intervals", "score", "Score 95% CI")
class OpenEndedGreekColumn(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
greek_column0 = OpenEndedMultilingualColumn("ELO", "score", "ELO")
greek_column1 = OpenEndedMultilingualColumn("ELO_intervals", "score", "ELO 95% CI")
greek_column2 = OpenEndedMultilingualColumn("Score", "score", "Score")
greek_column3 = OpenEndedMultilingualColumn("Score_intervals", "score", "Score 95% CI")
@dataclass
class ClosedEndedMultilingualColumn:
benchmark: str
metric: str
col_name: str
class ClosedEndedMultilingualColumns(Enum):
mtask0 = ClosedEndedMultilingualColumn("Global-MMLU-Arabic", "accuracy", "π¦πͺ Arabic")
mtask1 = ClosedEndedMultilingualColumn("Global-MMLU-French", "accuracy", "π«π· French")
mtask2 = ClosedEndedMultilingualColumn("Global-MMLU-Spanish", "accuracy", "πͺπΈ Spanish")
mtask3 = ClosedEndedMultilingualColumn("Global-MMLU-Portuguese", "accuracy", "π΅πΉ Portuguese")
mtask4 = ClosedEndedMultilingualColumn("Global-MMLU-Romanian", "accuracy", "π·π΄ Romanian")
mtask5 = ClosedEndedMultilingualColumn("Global-MMLU-Greek", "accuracy", "π¬π· Greek")
@dataclass
class MedSafetyColumn:
benchmark: str
metric: str
col_name: str
class MedSafetyColumns(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
med_safety_column0 = MedSafetyColumn("Harmfulness Score", "score", "Harmfulness Score")
med_safety_column1 = MedSafetyColumn("95% CI", "score", "95% CI")
med_safety_column2 = MedSafetyColumn("Competence, Compassion, and Respect for Human Dignity", "score", "Competence, Compassion, and Respect for Human Dignity")
med_safety_column3 = MedSafetyColumn("Patient Rights and Confidentiality", "score", "Patient Rights and Confidentiality")
med_safety_column4 = MedSafetyColumn("Continued Study and Information Sharing", "score", "Continued Study and Information Sharing")
med_safety_column5 = MedSafetyColumn("Medical Care for All", "score", "Medical Care for All")
med_safety_column6 = MedSafetyColumn("Community and Public Health", "score", "Community and Public Health")
med_safety_column7 = MedSafetyColumn("Physician's Freedom of Choice", "score", "Physician's Freedom of Choice")
med_safety_column8 = MedSafetyColumn("Professionalism and Honesty", "score", "Professionalism and Honesty")
med_safety_column9 = MedSafetyColumn("Responsibility to Patient", "score", "Responsibility to Patient")
med_safety_column10 = MedSafetyColumn("Law and Responsibility to Society", "score", "Law and Responsibility to Society")
@dataclass
class MedicalSummarizationColumn:
benchmark: str
metric: str
col_name: str
class MedicalSummarizationColumns(Enum):
medical_summarization_column0 = MedicalSummarizationColumn("coverage", "score", "Coverage")
medical_summarization_column1 = MedicalSummarizationColumn("conform", "score", "Conformity")
medical_summarization_column2 = MedicalSummarizationColumn("fact", "score", "Consistency")
medical_summarization_column3 = MedicalSummarizationColumn("brief", "score", "Conciseness")
@dataclass
class ACIColumn:
benchmark: str
metric: str
col_name: str
class ACIColumns(Enum):
aci_column0 = ACIColumn("coverage", "score", "Coverage")
aci_column1 = ACIColumn("conform", "score", "Conformity")
aci_column2 = ACIColumn("fact", "score", "Consistency")
# aci_column3 = ACIColumn("brief", "score", "Conciseness")
@dataclass
class SOAPColumn:
benchmark: str
metric: str
col_name: str
class SOAPColumns(Enum):
soap_column0 = SOAPColumn("coverage", "score", "Coverage")
soap_column1 = SOAPColumn("conform", "score", "Conformity")
soap_column2 = SOAPColumn("fact", "score", "Consistency")
# soap_column3 = SOAPColumn("brief", "score", "Conciseness")
@dataclass
class HealthbenchColumn:
benchmark: str
metric: str
col_name: str
class HealthbenchColumns(Enum):
healthbench_column0 = HealthbenchColumn("Overall Score", "score", "Overall Score")
healthbench_column2 = HealthbenchColumn("Responding under uncertainty", "score", "Responding under uncertainty")
healthbench_column3 = HealthbenchColumn("Health data tasks", "score", "Health data tasks")
healthbench_column4 = HealthbenchColumn("Global health", "score", "Global health")
healthbench_column5 = HealthbenchColumn("Expertise-tailored communication", "score", "Expertise-tailored communication")
healthbench_column6 = HealthbenchColumn("Context seeking", "score", "Context seeking")
healthbench_column7 = HealthbenchColumn("Emergency referrals", "score", "Emergency referrals")
healthbench_column8 = HealthbenchColumn("Response depth", "score", "Response depth")
healthbench_column9 = HealthbenchColumn("Axis: Completeness", "score", "Axis: Completeness")
healthbench_column10 = HealthbenchColumn("Axis: Context awareness", "score", "Axis: Context awareness")
healthbench_column11 = HealthbenchColumn("Axis: Accuracy", "score", "Axis: Accuracy")
healthbench_column12 = HealthbenchColumn("Axis: Instruction following", "score", "Axis: Instruction following")
healthbench_column13 = HealthbenchColumn("Axis: Communication quality", "score", "Axis: Communication quality")
@dataclass
class HealthbenchHardColumn:
benchmark: str
metric: str
col_name: str
class HealthbenchHardColumns(Enum):
healthbench_hard_column0 = HealthbenchHardColumn("Overall Score", "score", "Overall Score")
healthbench_hard_column2 = HealthbenchHardColumn("Responding under uncertainty", "score", "Responding under uncertainty")
healthbench_hard_column3 = HealthbenchHardColumn("Health data tasks", "score", "Health data tasks")
healthbench_hard_column4 = HealthbenchHardColumn("Global health", "score", "Global health")
healthbench_hard_column5 = HealthbenchHardColumn("Expertise-tailored communication", "score", "Expertise-tailored communication")
healthbench_hard_column6 = HealthbenchHardColumn("Context seeking", "score", "Context seeking")
healthbench_hard_column7 = HealthbenchHardColumn("Emergency referrals", "score", "Emergency referrals")
healthbench_hard_column8 = HealthbenchHardColumn("Response depth", "score", "Response depth")
healthbench_hard_column9 = HealthbenchHardColumn("Axis: Completeness", "score", "Axis: Completeness")
healthbench_hard_column10 = HealthbenchHardColumn("Axis: Context awareness", "score", "Axis: Context awareness")
healthbench_hard_column11 = HealthbenchHardColumn("Axis: Accuracy", "score", "Axis: Accuracy")
healthbench_hard_column12 = HealthbenchHardColumn("Axis: Instruction following", "score", "Axis: Instruction following")
healthbench_hard_column13 = HealthbenchHardColumn("Axis: Communication quality", "score", "Axis: Communication quality")
NUM_FEWSHOT = 0 # Change with your few shot
# ---------------------------------------------------
# Your leaderboard name
TITLE = """<h1 align="center" id="space-title" style="color: red;"> [DEV Mode] </h1>"""
# LOGO = """<img src="https://equalengineers.com/wp-content/uploads/2024/04/dummy-logo-5b.png" alt="Clinical X HF" width="500" height="333">"""
LOGO = """<img src="https://huggingface.co/spaces/m42-health/MEDIC-Benchmark/resolve/main/assets/logo_medic_4.png" alt="Clinical X HF" width="40%" style="display: block; margin-left: auto; margin-right: auto;">"""
FIVE_PILLAR_DIAGRAM = """<img src="https://huggingface.co/spaces/m42-health/MEDIC-Benchmark/resolve/main/assets/MEDIC_Diagram.jpg" alt="MEDIC Diagram" width="52%" style="display: block; margin-left: auto; margin-right: auto;">"""
# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
Deploying a good clinical LLM requires more than just acing closed-ended medical QA exams. It needs to be safe, ethical, comprehensive in its responses, and capable of reasoning and tackling complex medical tasks. The MEDIC framework aims to provide a transparent and comprehensive evaluation of LLM performance across various clinically relevant dimensions.
Disclaimer: It is important to note that the purpose of this evaluation is purely academic and exploratory. The models assessed here have not been approved for clinical use, and their results should not be interpreted as clinically validated. The leaderboard serves as a platform for researchers to compare models, understand their strengths and limitations, and drive further advancements in the field of clinical NLP.
"""
# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT_1 = f"""
## About
The MEDIC Leaderboard evaluates large language models (LLMs) on various healthcare tasks across five key dimensions. Designed to bridge the gap between stakeholder expectations and practical clinical applications, the MEDIC framework captures the interconnected capabilities LLMs need for real-world use. Its evaluation metrics objectively measure LLM performance on benchmark tasks and map results to the MEDIC dimensions. By assessing these dimensions, MEDIC aims to determine how effective and safe LLMs are for real-world healthcare settings.
"""
LLM_BENCHMARKS_TEXT_2 = """
## Evaluation Categories
### Close-ended Questions
This category measures the accuracy of an LLM's medical knowledge by having it answer multiple-choice questions from datasets like MedQA, MedMCQA, MMLU, MMLU Pro, PubMedQA, USMLE and Toxigen.
We used the Eleuther AI's Evaluation Harness framework, which focuses on the likelihood of a model generating each proposed answer rather than directly evaluating the generated text itself. We modified the framework's codebase to provide more detailed and relevant results. Rather than just calculating the probability of generating answer choice labels (e.g., a., b., c., or d.), we calculate the probability of generating the full answer text.
### Open-ended Questions
This category assesses the quality of the LLM's reasoning and explanations. The LLM is tasked with answering open-ended medical questions from various datasets:
- [MedicationQA](https://ebooks.iospress.nl/doi/10.3233/SHTI190176)
- [HealthSearchQA](https://www.nature.com/articles/s41586-023-06291-2)
- [ExpertQA](https://arxiv.org/abs/2309.07852)
Each question is presented to the models without special prompting to test their baseline capabilities. To compare models, we use a tournament-style approach. A judge (Llama3.1 70b Instruct) evaluates pairs of responses to the same question from different models. To eliminate position bias, each comparison is performed twice with reversed response positions. If the winner changes when positions are swapped, we consider the responses too close and declare a tie. After multiple comparisons, we calculate win rates and convert them to Elo ratings to rank the models.
It's important to note that this evaluation only assesses the quality of response writing, not medical accuracy. To properly evaluate clinical accuracy, a thorough study involving real healthcare professionals would be necessary.
### Medical Safety
Medical Safety category uses the "[med-safety](https://openreview.net/forum?id=1cq9pmwRgG)" benchmark dataset, which consists of 4500 scenarios presenting harmful medical requests. These scenarios cover all nine principles of medical ethics as defined by the American Medical Association (AMA).
In this evaluation, an LLM acts as a judge. Each response from the evaluated model is assigned a "harmfulness" score on a predefined scale. A low score means the model refused the harmful request, while a high score means it complied with the request.
### Medical Summarization
This category evaluates the LLM's ability to summarize medical texts, with a focus on clinical trial descriptions from ClinicalTrials.gov. The [dataset](https://trec.nist.gov/pubs/trec31/papers/Overview_trials.pdf) consists of 1629 carefully selected clinical trial protocols with detailed study descriptions (3000-8000 tokens long). The task is to generate concise and accurate summaries of these protocols.
It uses a novel "cross-examination" framework, where questions are generated from the original document and the LLM's summary to assess the scores of the summary. The four key scores calculated are:
- Coverage: Measures how thoroughly the summary covers the original document. A higher score means the summary includes more details from the original.
- Conformity: Also called the non-contradiction score, this checks if the summary avoids contradicting the original document. A higher score means the summary aligns better with the original.
- Consistency: Measures the level of non-hallucination, or how much the summary sticks to the facts in the document. A higher score means the summary is more factual and accurate.
- Conciseness: Measures how brief the summary is. A higher score means the summary is more concise. A negative score means the summary is longer than the original document.
### Note Generation
This category assesses the LLM's ability to generate structured clinical notes from doctor-patient conversations. It uses the same cross-examination framework as Medical Summarization across two datasets:
- [ACI-Bench](https://www.nature.com/articles/s41597-023-02487-3): A comprehensive collection designed specifically for benchmarking clinical note generation from doctor-patient dialogues. The dataset contains patient visit notes that have been validated by expert medical scribes and physicians.
- [SOAP Notes](https://arxiv.org/abs/2310.15959): Using the test split of the ChartNote dataset containing 250 synthetic patient-doctor conversations generated from real clinical notes. The task involves generating notes in the SOAP format with the following sections:
- Subjective: Patient's description of symptoms, medical history, and personal experiences
- Objective: Observable data like physical exam findings, vital signs, and diagnostic test results
- Assessment: Healthcare provider's diagnosis based on subjective and objective information
- Plan: Treatment plan including medications, therapies, follow-ups, and referrals
"""
EVALUATION_QUEUE_TEXT = """
Currently, the benchmark supports evaluation for models hosted on the huggingface hub and of decoder type. It doesn't support adapter models yet but we will soon add adapters too.
## Submission Guide for the MEDIC Benchamark
## First Steps Before Submitting a Model
### 1. Ensure Your Model Loads with AutoClasses
Verify that you can load your model and tokenizer using AutoClasses:
```python
from transformers import AutoConfig, AutoModel, AutoTokenizer
config = AutoConfig.from_pretrained("your model name", revision=revision)
model = AutoModel.from_pretrained("your model name", revision=revision)
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
```
Note:
- If this step fails, debug your model before submitting.
- Ensure your model is public.
### 2. Convert Weights to Safetensors
[Safetensors](https://huggingface.co/docs/safetensors/index) is a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
### 3. Complete Your Model Card
When we add extra information about models to the leaderboard, it will be automatically taken from the model card
### 4. Select the correct model type
Choose the correct model cateogory from the option below:
- π’ : π’ pretrained model: new, base models, trained on a given text corpora using masked modelling or new, base models, continuously trained on further corpus (which may include IFT/chat data) using masked modelling
- β : β fine-tuned models: pretrained models finetuned on more data or tasks.
- π¦ : π¦ preference-tuned models: chat like fine-tunes, either using IFT (datasets of task instruction), RLHF or DPO (changing the model loss a bit with an added policy), etc
### 5. Select Correct Precision
Choose the right precision to avoid evaluation errors:
- Not all models convert properly from float16 to bfloat16.
- Incorrect precision can cause issues (e.g., loading a bf16 model in fp16 may generate NaNs).
- If you have selected auto, the precision mentioned under `torch_dtype` under model config will be used.
### 6. Medically oriented model
If the model has been specifically built for medical domains i.e. pretrained/finetuned on significant medical data, make sure check the `Domain specific` checkbox
### 7. Chat template
Select this option if your model uses a chat template. The chat template will be used during evaluation.
- Before submitting, make sure the chat template is defined in tokenizer config.
Upon successful submission of your request, your model's result would be updated on the leaderboard within 5 working days!
"""
NOTE_GENERATION_METRICS = """
- **Coverage**: Measures how thoroughly the summary covers the original document. A higher score means the summary includes more details from the original.
- **Conformity**: Also called the non-contradiction score, this checks if the summary avoids contradicting the original document. A higher score means the summary aligns better with the original.
- **Consistency**: Measures the level of non-hallucination, or how much the summary sticks to the facts in the document. A higher score means the summary is more factual and accurate.
- **Overall Score**: The average of the above three scores.
"""
CROSS_EVALUATION_METRICS = """
- **Coverage**: Measures how thoroughly the summary covers the original document. A higher score means the summary includes more details from the original.
- **Conformity**: Also called the non-contradiction score, this checks if the summary avoids contradicting the original document. A higher score means the summary aligns better with the original.
- **Consistency**: Measures the level of non-hallucination, or how much the summary sticks to the facts in the document. A higher score means the summary is more factual and accurate.
- **Conciseness**: Measures how brief the summary is. A higher score means the summary is more concise. A negative score means the summary is longer than the original document.
- **Overall Score**: The average of coverage, conformity, consistency, and the harmonic mean of coverage and conciseness (if both are positive, otherwise 0).
"""
HEALTHBENCH_METRICS = """
HealthBench consists of 5,000 multi-turn conversations between users (patients or clinicians) and AI models, covering a wide range of medical topics and scenarios. Each conversation is accompanied by a set of physician-created rubric criteria, totaling over 48,562 unique items, to grade model responses based on accuracy, relevance, and safety.
For more information, refer to the [HealthBench paper](https://cdn.openai.com/pdf/bd7a39d5-9e9f-47b3-903c-8b847ca650c7/healthbench_paper.pdf) and the [OpenAI blog post](https://openai.com/index/healthbench/).
**Judge Used**: [meta-llama/Llama-3.1-70B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct)
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
@misc{kanithi2024mediccomprehensiveframeworkevaluating,
title={MEDIC: Towards a Comprehensive Framework for Evaluating LLMs in Clinical Applications},
author={Praveen K Kanithi and ClΓ©ment Christophe and Marco AF Pimentel and Tathagata Raha and Nada Saadi and Hamza Javed and Svetlana Maslenkova and Nasir Hayat and Ronnie Rajan and Shadab Khan},
year={2024},
eprint={2409.07314},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2409.07314},
}
"""
|