Ozziejoe's picture
Update app.py
0941659 verified
# app.py
import gradio as gr
import torch
import os
from transformers import AutoTokenizer, AutoModelForSequenceClassification
# βœ… Load token from secret environment variable
HF_TOKEN = os.getenv("HF_TOKEN")
model_id = "Ozziejoe/eemm-deberta-v3-small"
label_names = [
"Cognition", "Affect", "Self", "Motivation", "Attention", "OB", "Context",
"Social", "Physical", "Psych"
]
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=HF_TOKEN)
model = AutoModelForSequenceClassification.from_pretrained(model_id, use_auth_token=HF_TOKEN)
model.eval()
def classify(text):
with torch.no_grad():
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
outputs = model(**inputs)
probs = torch.sigmoid(outputs.logits)[0]
labels = [label_names[i] for i, p in enumerate(probs) if p > 0.5]
return ", ".join(labels) if labels else "No domain confidently predicted."
demo = gr.Interface(
fn=classify,
inputs=gr.Textbox(label="Enter a question"),
outputs=gr.Textbox(label="Predicted domains"),
title="EEMM Multi-Label Classifier",
description="Classifies a question into multiple psychological domains.",
allow_flagging="never"
)
if __name__ == "__main__":
demo.launch(share=True, server_name="0.0.0.0")