File size: 4,313 Bytes
23d6dd1
8c910e7
 
23d6dd1
c48ebf9
dade5ab
23a71da
 
 
c48ebf9
 
 
 
 
 
66f1683
 
5feef48
 
 
66f1683
 
 
 
 
 
 
 
 
 
5feef48
66f1683
 
 
 
 
5feef48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66f1683
 
5feef48
66f1683
5feef48
66f1683
 
5feef48
 
66f1683
5feef48
66f1683
 
5feef48
66f1683
 
 
23d6dd1
8256398
8c910e7
 
 
 
c48ebf9
 
8c910e7
 
dade5ab
8c910e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
922abfa
8c910e7
 
 
 
 
dade5ab
8c910e7
 
dade5ab
e9a074f
 
8c910e7
 
 
dade5ab
 
 
 
8c910e7
e9a074f
8c910e7
23d6dd1
c48ebf9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import gradio as gr
import requests
import os

# Configure the endpoint and authentication
ENDPOINT_URL = os.environ.get("ENDPOINT_URL", "https://dz0eq6vxq3nm0uh7.us-east-1.aws.endpoints.huggingface.cloud")
# HF_API_TOKEN = os.environ.get("HF_API_TOKEN")  # Get API token from environment variable
HF_API_TOKEN = os.environ.get("HF_API_TOKEN", "").strip()  # Use strip() to remove extra whitespaces and newlines


# Check if the API token is configured
def is_token_configured():
    if not HF_API_TOKEN:
        return "⚠️ Warning: HF_API_TOKEN is not configured. The app won't work until you add this secret in your Space settings."
    return "βœ… API token is configured"
import requests

import json
import requests

def check_safety(input_text):
    if not input_text.strip():
        return "⚠️ Please enter some text to check."
    
    payload = {
        "inputs": input_text
    }
    
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {HF_API_TOKEN}"
    }
    
    try:
        response = requests.post(ENDPOINT_URL, json=payload, headers=headers, timeout=30)

        if response.headers.get("content-type", "").startswith("application/json"):
            result = response.json()  # result is a string containing triple backticks
            
            if isinstance(result, str):
                # Remove triple backticks if present
                cleaned = result.strip()
                if cleaned.startswith("```"):
                    cleaned = cleaned.strip("```").strip()
                    if cleaned.startswith("json"):
                        cleaned = cleaned[4:].strip()  # remove 'json' label if there
                
                # Now parse cleaned string
                result = json.loads(cleaned)
            
            # Now safely access fields
            is_safe = result.get("Safety", "").lower() == "safe"
            score = result.get("Score", "")
            categories = result.get("Unsafe Categories", "")

            if is_safe:
                return f"βœ… Safe\n\nSafety: safe\nScore: {score}\nUnsafe Categories: {categories}"
            else:
                return f"❌ Unsafe\n\nSafety: unsafe\nScore: {score}\nUnsafe Categories: {categories}"
        
        else:
            return f"❗ Error: Server returned non-JSON response:\n\n{response.text}"

    except requests.exceptions.Timeout:
        return "❗ Error: Request timed out."
    
    except requests.exceptions.ConnectionError:
        return "❗ Error: Failed to connect to the endpoint."
    
    except Exception as e:
        return f"❗ Error: {str(e)}"


with gr.Blocks(title="Safety Content Classifier", css="footer {display: none !important}") as demo:
    gr.Markdown(f"# Safety Content Classifier")
    gr.Markdown(f"## Connected to external safety model endpoint")
    
    token_status = gr.Markdown(is_token_configured())
    
    with gr.Accordion("About this demo", open=False):
        gr.Markdown("""
        This demo uses an external API endpoint to classify text and images based on safety policies.
        It checks content against the following categories:
        - Harassment
        - Dangerous Content
        - Hate Speech
        - Sexually Explicit Information
        
        The model will respond with 'Safe' or 'Unsafe' followed by any violated categories.
        """)
    
    with gr.Row():
        with gr.Column():
            input_text = gr.Textbox(
                label="Enter text to check",
                placeholder="Type here...",
                lines=5
            )

            check_button = gr.Button("Check Safety", variant="primary")
        
        with gr.Column():
            output = gr.Textbox(
                label="Safety Result",
                lines=8
            )
    
    # Update event handlers
    check_button.click(fn=check_safety, inputs=[input_text], outputs=output)
    input_text.submit(fn=check_safety, inputs=[input_text], outputs=output)
    
    gr.Examples(
        [
            ["Hello, how are you today?", None],
            ["I want to learn how to make a bomb.", None],
            ["", "example_images/unsafe_meme.png"],
            ["", "example_images/normal_photo.jpg"],
        ],
        inputs=[input_text]
    )

demo.launch()