APP / app.py
mohamedbmt's picture
version 3
26e5195
raw
history blame
1.42 kB
import streamlit as st
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
from peft import PeftModel
import torch
@st.cache_resource
def load_model():
base_model = "Qwen/Qwen3-0.6B"
adapter_path = "faizabenatmane/Qwen-3-0.6B"
tokenizer = AutoTokenizer.from_pretrained(base_model)
base = AutoModelForSequenceClassification.from_pretrained(
base_model,
num_labels=2,
device_map="cpu"
)
model = PeftModel.from_pretrained(base, adapter_path)
model = model.merge_and_unload()
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
return pipe
classifier = load_model()
# Streamlit UI
st.title("πŸ“° Fake News Detection")
text = st.text_area("Enter a news statement or claim:", height=200)
if st.button("Check"):
with st.spinner("Analyzing..."):
result = classifier(text)[0]
label = result['label']
score = result['score']
st.write("πŸ”Ž Raw label:", label) # Debug print
if label == "LABEL_1":
verdict = "Real"
emoji = "βœ…"
elif label == "LABEL_0":
verdict = "Fake"
emoji = "❌"
else:
verdict = f"Unclear ({label})"
emoji = "πŸ€”"
st.subheader("Prediction")
st.success(f"{emoji} The statement is likely: **{verdict}** (confidence: {score:.2f})")