File size: 1,311 Bytes
1f84f5a
 
83c6c9d
a8cbfd8
0941659
a8cbfd8
83c6c9d
0941659
 
568f551
 
a8cbfd8
 
 
 
6243d76
0941659
 
a8cbfd8
 
568f551
a8cbfd8
 
 
 
568f551
 
83c6c9d
1f84f5a
568f551
1f84f5a
a8cbfd8
 
0941659
a8cbfd8
1f84f5a
83c6c9d
0941659
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# app.py

import gradio as gr
import torch
import os
from transformers import AutoTokenizer, AutoModelForSequenceClassification

# ✅ Load token from secret environment variable
HF_TOKEN = os.getenv("HF_TOKEN")
model_id = "Ozziejoe/eemm-deberta-v3-small"

label_names = [
    "Cognition", "Affect", "Self", "Motivation", "Attention", "OB", "Context",
    "Social", "Physical", "Psych"
]

tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=HF_TOKEN)
model = AutoModelForSequenceClassification.from_pretrained(model_id, use_auth_token=HF_TOKEN)
model.eval()

def classify(text):
    with torch.no_grad():
        inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
        outputs = model(**inputs)
        probs = torch.sigmoid(outputs.logits)[0]
        labels = [label_names[i] for i, p in enumerate(probs) if p > 0.5]
        return ", ".join(labels) if labels else "No domain confidently predicted."

demo = gr.Interface(
    fn=classify,
    inputs=gr.Textbox(label="Enter a question"),
    outputs=gr.Textbox(label="Predicted domains"),
    title="EEMM Multi-Label Classifier",
    description="Classifies a question into multiple psychological domains.",
    allow_flagging="never"
)

if __name__ == "__main__":
    demo.launch(share=True, server_name="0.0.0.0")