Update hate_speech_demo.py
Browse files- hate_speech_demo.py +5 -5
hate_speech_demo.py
CHANGED
@@ -810,7 +810,7 @@ def rate_user_input(user_input):
|
|
810 |
# Create the popup div (initially hidden)
|
811 |
knowledge_html = f"""
|
812 |
<div id="{popup_id}" class="knowledge-popup" style="display: none;">
|
813 |
-
<div class="knowledge-popup-header">Supporting evidence for Contextual
|
814 |
<button class="knowledge-popup-close"
|
815 |
onclick="this.parentElement.style.display='none';
|
816 |
document.getElementById('btn-{popup_id}').style.display='inline-block';
|
@@ -873,21 +873,21 @@ def create_gradio_app():
|
|
873 |
# Use the custom CSS with PDF modal styling
|
874 |
custom_css = CUSTOM_CSS
|
875 |
|
876 |
-
with gr.Blocks(title="Hate Speech Rating
|
877 |
# Add loading spinner
|
878 |
loading_spinner = gr.HTML('<div id="loading-spinner"></div>')
|
879 |
|
880 |
# Add the PDF modal HTML directly (defined at the top of the file)
|
881 |
gr.HTML(PDF_MODAL_HTML)
|
882 |
|
883 |
-
gr.Markdown("#
|
884 |
gr.HTML("""
|
885 |
<div style="margin-bottom: 20px;">
|
886 |
<p>
|
887 |
<strong>Assess whether user-generated social content contains hate speech using Contextual AI's State-of-the-Art Agentic RAG system.</strong>
|
888 |
</p>
|
889 |
<p>
|
890 |
-
Contextual's
|
891 |
</p>
|
892 |
|
893 |
<h2>Instructions</h2>
|
@@ -935,7 +935,7 @@ def create_gradio_app():
|
|
935 |
# π Contextual Safety Oracle
|
936 |
gr.HTML("""
|
937 |
<div class="result-header" style="display: flex; align-items: center; gap: 10px;">
|
938 |
-
<span>π Contextual
|
939 |
<a href="#" class="knowledge-button" onclick="openPolicyPopup(); return false;">View policy</a>
|
940 |
</div>
|
941 |
""")
|
|
|
810 |
# Create the popup div (initially hidden)
|
811 |
knowledge_html = f"""
|
812 |
<div id="{popup_id}" class="knowledge-popup" style="display: none;">
|
813 |
+
<div class="knowledge-popup-header">Supporting evidence for Contextual Policy Engine</div>
|
814 |
<button class="knowledge-popup-close"
|
815 |
onclick="this.parentElement.style.display='none';
|
816 |
document.getElementById('btn-{popup_id}').style.display='inline-block';
|
|
|
873 |
# Use the custom CSS with PDF modal styling
|
874 |
custom_css = CUSTOM_CSS
|
875 |
|
876 |
+
with gr.Blocks(title="Hate Speech Rating Contextual Policy Engine", theme=theme, css=custom_css) as app:
|
877 |
# Add loading spinner
|
878 |
loading_spinner = gr.HTML('<div id="loading-spinner"></div>')
|
879 |
|
880 |
# Add the PDF modal HTML directly (defined at the top of the file)
|
881 |
gr.HTML(PDF_MODAL_HTML)
|
882 |
|
883 |
+
gr.Markdown("# Contextual Policy Engine for Rating Hate Speech [BETA]")
|
884 |
gr.HTML("""
|
885 |
<div style="margin-bottom: 20px;">
|
886 |
<p>
|
887 |
<strong>Assess whether user-generated social content contains hate speech using Contextual AI's State-of-the-Art Agentic RAG system.</strong>
|
888 |
</p>
|
889 |
<p>
|
890 |
+
The Contextual Policy Engine's classifications are steerable and explainable as they are based on a policy document rather than parametric knowledge. This app returns ratings from LlamaGuard 3.0, the OpenAI Moderation API and the Perspective API from Google Jigsaw for comparison. Feedback is welcome as we work with design partners to bring this to production. Reach out to Aravind Mohan, Head of Data Science, at <a href="mailto:aravind.mohan@contextual.ai">aravind.mohan@contextual.ai</a>.
|
891 |
</p>
|
892 |
|
893 |
<h2>Instructions</h2>
|
|
|
935 |
# π Contextual Safety Oracle
|
936 |
gr.HTML("""
|
937 |
<div class="result-header" style="display: flex; align-items: center; gap: 10px;">
|
938 |
+
<span>π Contextual Policy Engine</span>
|
939 |
<a href="#" class="knowledge-button" onclick="openPolicyPopup(); return false;">View policy</a>
|
940 |
</div>
|
941 |
""")
|