Machlovi commited on
Commit
d03b0e0
Β·
verified Β·
1 Parent(s): 5feef48

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -63
app.py CHANGED
@@ -8,78 +8,95 @@ ENDPOINT_URL = os.environ.get("ENDPOINT_URL", "https://dz0eq6vxq3nm0uh7.us-east-
8
  HF_API_TOKEN = os.environ.get("HF_API_TOKEN", "").strip() # Use strip() to remove extra whitespaces and newlines
9
 
10
 
11
- # Check if the API token is configured
12
- def is_token_configured():
13
- if not HF_API_TOKEN:
14
- return "⚠️ Warning: HF_API_TOKEN is not configured. The app won't work until you add this secret in your Space settings."
15
- return "βœ… API token is configured"
16
  import requests
17
-
18
  import json
19
- import requests
20
 
21
- def check_safety(input_text):
22
- if not input_text.strip():
23
- return "⚠️ Please enter some text to check."
24
-
25
- payload = {
26
- "inputs": input_text
27
- }
28
-
29
- headers = {
30
- "Content-Type": "application/json",
31
- "Authorization": f"Bearer {HF_API_TOKEN}"
32
- }
33
-
34
- try:
35
- response = requests.post(ENDPOINT_URL, json=payload, headers=headers, timeout=30)
36
-
37
- if response.headers.get("content-type", "").startswith("application/json"):
38
- result = response.json() # result is a string containing triple backticks
39
-
40
- if isinstance(result, str):
41
- # Remove triple backticks if present
42
- cleaned = result.strip()
43
- if cleaned.startswith("```"):
44
- cleaned = cleaned.strip("```").strip()
45
- if cleaned.startswith("json"):
46
- cleaned = cleaned[4:].strip() # remove 'json' label if there
47
-
48
- # Now parse cleaned string
49
- result = json.loads(cleaned)
50
-
51
- # Now safely access fields
52
- is_safe = result.get("Safety", "").lower() == "safe"
53
- score = result.get("Score", "")
54
- categories = result.get("Unsafe Categories", "")
55
-
56
- if is_safe:
57
- return f"βœ… Safe\n\nSafety: safe\nScore: {score}\nUnsafe Categories: {categories}"
58
- else:
59
- return f"❌ Unsafe\n\nSafety: unsafe\nScore: {score}\nUnsafe Categories: {categories}"
60
 
61
- else:
62
- return f"❗ Error: Server returned non-JSON response:\n\n{response.text}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
- except requests.exceptions.Timeout:
65
- return "❗ Error: Request timed out."
66
-
67
- except requests.exceptions.ConnectionError:
68
- return "❗ Error: Failed to connect to the endpoint."
69
-
70
- except Exception as e:
71
- return f"❗ Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
  with gr.Blocks(title="Safety Content Classifier", css="footer {display: none !important}") as demo:
75
- gr.Markdown(f"# Safety Content Classifier")
76
- gr.Markdown(f"## Connected to external safety model endpoint")
77
 
78
  token_status = gr.Markdown(is_token_configured())
79
 
80
  with gr.Accordion("About this demo", open=False):
81
  gr.Markdown("""
82
- This demo uses an external API endpoint to classify text and images based on safety policies.
83
  It checks content against the following categories:
84
  - Harassment
85
  - Dangerous Content
@@ -111,12 +128,13 @@ with gr.Blocks(title="Safety Content Classifier", css="footer {display: none !im
111
 
112
  gr.Examples(
113
  [
114
- ["Hello, how are you today?", None],
115
- ["I want to learn how to make a bomb.", None],
116
- ["", "example_images/unsafe_meme.png"],
117
- ["", "example_images/normal_photo.jpg"],
118
  ],
119
  inputs=[input_text]
120
  )
121
 
122
  demo.launch()
 
 
8
  HF_API_TOKEN = os.environ.get("HF_API_TOKEN", "").strip() # Use strip() to remove extra whitespaces and newlines
9
 
10
 
 
 
 
 
 
11
  import requests
 
12
  import json
 
13
 
14
+ # Assuming your class with extract_and_parse_json exists
15
+ # Or you can define it separately if needed
16
+
17
+ class SafetyChecker:
18
+ def __init__(self):
19
+ self.ENDPOINT_URL = os.environ.get("ENDPOINT_URL", "https://your-endpoint")
20
+ self.HF_API_TOKEN = os.environ.get("HF_API_TOKEN", "").strip()
21
+
22
+ def extract_and_parse_json(self, response: str):
23
+ match = re.search(r'```(?:json)?\s*(.*?)\s*```', response, re.DOTALL)
24
+ content = match.group(1).strip() if match else response.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ if not content.startswith("{") and ":" in content:
27
+ content = "{" + content + "}"
28
+
29
+ try:
30
+ parsed = json.loads(content)
31
+ except json.JSONDecodeError:
32
+ cleaned = content.replace(""", "\"").replace(""", "\"").replace("'", "\"")
33
+ cleaned = re.sub(r',\s*}', '}', cleaned)
34
+ cleaned = re.sub(r',\s*]', ']', cleaned)
35
+ try:
36
+ parsed = json.loads(cleaned)
37
+ except Exception:
38
+ pairs = re.findall(r'"([^"]+)":\s*"?([^",\{\}\[\]]+)"?', content)
39
+ if pairs:
40
+ parsed = {k.strip(): v.strip() for k, v in pairs}
41
+ else:
42
+ parsed = {
43
+ "Safety": "",
44
+ "Score": "",
45
+ "Unsafe Categories": "",
46
+ }
47
+ return parsed
48
 
49
+ def check_safety(self, input_text):
50
+ if not input_text.strip():
51
+ return "⚠️ Please enter some text to check."
52
+
53
+ payload = {"inputs": input_text}
54
+ headers = {
55
+ "Content-Type": "application/json",
56
+ "Authorization": f"Bearer {self.HF_API_TOKEN}"
57
+ }
58
+
59
+ try:
60
+ response = requests.post(self.ENDPOINT_URL, json=payload, headers=headers, timeout=30)
61
+
62
+ if response.status_code == 200:
63
+ result_raw = response.json() # still a string inside triple backticks
64
+
65
+ if isinstance(result_raw, str):
66
+ parsed_result = self.extract_and_parse_json(result_raw)
67
+ else:
68
+ parsed_result = result_raw
69
 
70
+ # Now parsed_result is a dictionary
71
+ safety = parsed_result.get("Safety", "Unknown")
72
+ score = parsed_result.get("Score", "")
73
+ categories = parsed_result.get("Unsafe Categories", "")
74
+
75
+ is_safe = (safety.lower() == "safe")
76
+
77
+ if is_safe:
78
+ return f"βœ… Safe\n\nSafety: {safety}\nScore: {score}\nUnsafe Categories: {categories}"
79
+ else:
80
+ return f"❌ Unsafe\n\nSafety: {safety}\nScore: {score}\nUnsafe Categories: {categories}"
81
+ else:
82
+ return f"❗ Error: Request failed with status code {response.status_code}.\nDetails: {response.text}"
83
+
84
+ except requests.exceptions.Timeout:
85
+ return "❗ Error: Request timed out."
86
+ except requests.exceptions.ConnectionError:
87
+ return "❗ Error: Failed to connect to the endpoint."
88
+ except Exception as e:
89
+ return f"❗ Error: {str(e)}"
90
 
91
  with gr.Blocks(title="Safety Content Classifier", css="footer {display: none !important}") as demo:
92
+ gr.Markdown("# Safety Content Classifier")
93
+ gr.Markdown("## Connected to external safety model endpoint")
94
 
95
  token_status = gr.Markdown(is_token_configured())
96
 
97
  with gr.Accordion("About this demo", open=False):
98
  gr.Markdown("""
99
+ This demo uses an external API endpoint to classify text based on safety policies.
100
  It checks content against the following categories:
101
  - Harassment
102
  - Dangerous Content
 
128
 
129
  gr.Examples(
130
  [
131
+ ["Hello, how are you today?"],
132
+ ["I want to learn how to make a bomb."],
133
+ ["Let's meet for coffee tomorrow."],
134
+ ["COVID vaccines are a secret plot by the government."],
135
  ],
136
  inputs=[input_text]
137
  )
138
 
139
  demo.launch()
140
+