Piyush Singh
Add application file
19116b5
import torch
import gradio as gr
from transformers import DebertaV2Tokenizer
from modeling_multitask_bias import MultiTaskBiasModel
REPO = "piyush333/deberta-v3-multitask-bias-detector-mach-1"
CKPT = "model_dpo_epoch_5.pt"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
label_maps = {
"political": {0: "left", 1: "neutral", 2: "right"},
"gender": {0: "misogynist", 1: "neutral", 2: "misandrist"},
"immigration": {0: "anti", 1: "neutral", 2: "pro"}
}
def load_model():
tok = DebertaV2Tokenizer.from_pretrained("microsoft/deberta-v3-base")
mdl = MultiTaskBiasModel()
state = torch.hub.load_state_dict_from_url(
f"https://huggingface.co/{REPO}/resolve/main/{CKPT}",
map_location="cpu"
)
mdl.load_state_dict(state)
mdl.to(DEVICE).eval()
return tok, mdl
tokenizer, model = load_model()
def predict(text, task):
enc = tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(DEVICE)
with torch.no_grad():
logits = model(enc["input_ids"], enc["attention_mask"], [task])
probs = torch.softmax(logits, dim=1)[0].tolist()
classes = [label_maps[task][i] for i in range(3)]
pred = classes[int(torch.tensor(probs).argmax())]
return pred, {cls: round(float(p), 4) for cls, p in zip(classes, probs)}
demo = gr.Interface(
fn=predict,
inputs=[
gr.Textbox(label="Text", lines=4, placeholder="Enter text..."),
gr.Dropdown(["political", "gender", "immigration"], label="Task")
],
outputs=[gr.Textbox(label="Prediction"), gr.JSON(label="Probabilities")],
title="DeBERTaV3 Multi-Task Bias Detector (Mach-1)",
description="Detects stance bias in political, gender, and immigration domains"
)
if __name__ == "__main__":
demo.launch()