Spaces:
Sleeping
Sleeping
""" | |
Text content for the GuardBench Leaderboard. | |
""" | |
TITLE = """ | |
<div style="text-align: center; margin-bottom: 1rem"> | |
<h1>CircleGuardBench Leaderboard</h1> | |
</div> | |
""" | |
INTRODUCTION_TEXT = """ | |
## Introduction | |
CircleGuardBench is a comprehensive benchmark for evaluating the protection capabilities of large language model (LLM) guard systems. | |
This leaderboard tracks model performance across various safety categories, including harmful content detection, | |
jailbreak resistance, and more. | |
Models are evaluated on their ability to properly refuse harmful requests and detect problematic content | |
across multiple categories and test scenarios. | |
""" | |
LLM_BENCHMARKS_TEXT = """ | |
CircleGuardBench is the first-of-its-kind benchmark for evaluating the protection capabilities of large language model (LLM) guard systems. | |
It tests how well guard models block harmful content, resist jailbreaks, avoid false positives, and operate efficiently in real-time environments on a taxonomy close to real-world data. | |
Learn more about us at [whitecircle.ai](https://whitecircle.ai) | |
""" | |
EVALUATION_QUEUE_TEXT = """ | |
## Submit Your Model | |
To add your model to the CircleGuardBench leaderboard: | |
1. Run your evaluation using the CircleGuardBench framework at https://github.com/whitecircle-ai/circle-guard-bench | |
2. Upload your run results in .jsonl format using this form. | |
3. Once validated, your model will appear on the leaderboard. | |
### ✉️✨ Ready? Upload your results below! | |
""" | |
CITATION_BUTTON_LABEL = "Cite CircleGuardBench" | |
CITATION_BUTTON_TEXT = """ | |
@misc{circleguardbench2025, | |
author = {whitecircle-ai}, | |
title = {CircleGuardBench: Comprehensive Benchmark for LLM Safety Guardrails. Learn more about us at whitecircle.ai}, | |
year = {2025}, | |
publisher = {GitHub}, | |
journal = {GitHub repository}, | |
howpublished = {\\url{https://github.com/whitecircle-ai/circle-guard-bench}} | |
} | |
""" | |