APP / app.py
mohamedbmt's picture
oled version
e20d2a1
raw
history blame
1.48 kB
import streamlit as st
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
from peft import PeftModel
import torch
# Load model and tokenizer with adapter
@st.cache_resource
def load_model():
base_model = "Qwen/Qwen3-0.6B"
adapter_path = "faizabenatmane/Qwen-3-0.6B"
tokenizer = AutoTokenizer.from_pretrained(base_model)
base = AutoModelForSequenceClassification.from_pretrained(
base_model,
num_labels=2,
device_map="cpu"
)
model = PeftModel.from_pretrained(base, adapter_path)
model = model.merge_and_unload()
# βœ… Text classification pipeline
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
return pipe
# Load pipeline once
classifier = load_model()
# Streamlit UI
st.title("πŸ“° Fake News Detection")
text = st.text_area("Enter a news statement or claim:", height=200)
if st.button("Check"):
with st.spinner("Analyzing..."):
# Get classification result
result = classifier(text)[0]
label = result['label']
score = result['score']
# Optional: format label nicely
if "1" in label or "POSITIVE" in label.upper():
verdict = "Real"
emoji = "βœ…"
else:
verdict = "Fake"
emoji = "❌"
# Show result
st.subheader("Prediction")
st.success(f"{emoji} The statement is likely: **{verdict}** (confidence: {score:.2f})")