Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,88 +1,67 @@
|
|
1 |
-
from transformers import pipeline
|
2 |
from fastapi import FastAPI, Request
|
3 |
-
import
|
|
|
|
|
4 |
|
5 |
from uagents import Agent, Context, Bureau, Model
|
6 |
|
7 |
-
#
|
|
|
8 |
|
|
|
|
|
|
|
|
|
9 |
class TextInput(Model):
|
10 |
text: str
|
11 |
|
12 |
-
#
|
13 |
-
|
14 |
-
emotion_model = pipeline(
|
15 |
-
"text-classification",
|
16 |
-
model="bhadresh-savani/distilbert-base-uncased-emotion"
|
17 |
-
)
|
18 |
-
|
19 |
-
# βββ CUSTOM ANALYSIS LOGIC βββββββββββββββββββββββββββββββββββββββββββββββββ
|
20 |
-
|
21 |
def analyze_text_metrics(text):
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
metrics["psychosis"] += score * 0.5
|
49 |
-
|
50 |
-
# keyword overrides
|
51 |
-
if any(kw in t for kw in suicide_keywords):
|
52 |
-
metrics["self_harm"] = max(metrics["self_harm"], 0.8)
|
53 |
-
if any(kw in t for kw in psychosis_keywords):
|
54 |
-
metrics["psychosis"] = max(metrics["psychosis"], 0.8)
|
55 |
-
|
56 |
-
# clamp into [0.01, 0.99]
|
57 |
-
for k in metrics:
|
58 |
-
val = metrics[k]
|
59 |
-
clamped = max(min(val, 0.99), 0.01)
|
60 |
-
metrics[k] = round(clamped, 2)
|
61 |
-
|
62 |
-
return metrics
|
63 |
-
|
64 |
-
# βββ uAgent DEFINITION ββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
65 |
-
|
66 |
agent = Agent(name="sentiment_agent")
|
67 |
|
68 |
@agent.on_message(model=TextInput)
|
69 |
async def handle_message(ctx: Context, sender: str, msg: TextInput):
|
70 |
-
|
71 |
-
await ctx.send(sender,
|
72 |
-
|
73 |
-
# βββ FASTAPI HTTP ENDPOINT βββββββββββββββββββββββββββββββββββββββββββββββ
|
74 |
|
|
|
75 |
app = FastAPI()
|
76 |
|
77 |
@app.post("/")
|
78 |
-
async def
|
79 |
data = await request.json()
|
80 |
return analyze_text_metrics(data.get("text", ""))
|
81 |
|
82 |
-
# βββ START BOTH βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
83 |
-
|
84 |
if __name__ == "__main__":
|
85 |
bureau = Bureau()
|
86 |
bureau.add(agent)
|
87 |
-
bureau.run_in_thread()
|
88 |
-
|
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI, Request
|
2 |
+
import google.generativeai as genai
|
3 |
+
import os
|
4 |
+
import json
|
5 |
|
6 |
from uagents import Agent, Context, Bureau, Model
|
7 |
|
8 |
+
# Load Gemini API Key
|
9 |
+
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
|
10 |
|
11 |
+
# Use Gemini Pro
|
12 |
+
model = genai.GenerativeModel("gemini-pro")
|
13 |
+
|
14 |
+
# uAgent input model
|
15 |
class TextInput(Model):
|
16 |
text: str
|
17 |
|
18 |
+
# Prompting logic
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
def analyze_text_metrics(text):
|
20 |
+
prompt = f"""
|
21 |
+
You are a mental health AI assistant. Return a JSON with:
|
22 |
+
- self_harm
|
23 |
+
- homicidal
|
24 |
+
- distress
|
25 |
+
- psychosis
|
26 |
+
|
27 |
+
Scores between 0.01β0.99.
|
28 |
+
|
29 |
+
Text: "{text}"
|
30 |
+
Respond in JSON format only.
|
31 |
+
"""
|
32 |
+
try:
|
33 |
+
response = model.generate_content(prompt)
|
34 |
+
raw = response.text.strip().strip("```json").strip("```")
|
35 |
+
return json.loads(raw)
|
36 |
+
except Exception as e:
|
37 |
+
return {
|
38 |
+
"self_harm": 0.01,
|
39 |
+
"homicidal": 0.01,
|
40 |
+
"distress": 0.01,
|
41 |
+
"psychosis": 0.01,
|
42 |
+
"error": str(e)
|
43 |
+
}
|
44 |
+
|
45 |
+
# uAgent definition
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
agent = Agent(name="sentiment_agent")
|
47 |
|
48 |
@agent.on_message(model=TextInput)
|
49 |
async def handle_message(ctx: Context, sender: str, msg: TextInput):
|
50 |
+
result = analyze_text_metrics(msg.text)
|
51 |
+
await ctx.send(sender, result)
|
|
|
|
|
52 |
|
53 |
+
# FastAPI app for Space
|
54 |
app = FastAPI()
|
55 |
|
56 |
@app.post("/")
|
57 |
+
async def analyze(request: Request):
|
58 |
data = await request.json()
|
59 |
return analyze_text_metrics(data.get("text", ""))
|
60 |
|
|
|
|
|
61 |
if __name__ == "__main__":
|
62 |
bureau = Bureau()
|
63 |
bureau.add(agent)
|
64 |
+
bureau.run_in_thread()
|
65 |
+
|
66 |
+
import uvicorn
|
67 |
+
uvicorn.run(app, host="0.0.0.0", port=7860) # Hugging Face expects 7860
|