Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline | |
from peft import PeftModel | |
import torch | |
# Load model and tokenizer with adapter | |
def load_model(): | |
base_model = "Qwen/Qwen3-0.6B" | |
adapter_path = "faizabenatmane/Qwen-3-0.6B" | |
tokenizer = AutoTokenizer.from_pretrained(base_model) | |
base = AutoModelForSequenceClassification.from_pretrained( | |
base_model, | |
num_labels=2, | |
device_map="cpu" | |
) | |
model = PeftModel.from_pretrained(base, adapter_path) | |
model = model.merge_and_unload() | |
# β Text classification pipeline | |
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer) | |
return pipe | |
# Load pipeline once | |
classifier = load_model() | |
# Streamlit UI | |
st.title("π° Fake News Detection") | |
text = st.text_area("Enter a news statement or claim:", height=200) | |
if st.button("Check"): | |
with st.spinner("Analyzing..."): | |
# Get classification result | |
result = classifier(text)[0] | |
label = result['label'] | |
score = result['score'] | |
# Optional: format label nicely | |
if "1" in label or "POSITIVE" in label.upper(): | |
verdict = "Real" | |
emoji = "β " | |
else: | |
verdict = "Fake" | |
emoji = "β" | |
# Show result | |
st.subheader("Prediction") | |
st.success(f"{emoji} The statement is likely: **{verdict}** (confidence: {score:.2f})") | |