Spaces:
Sleeping
Sleeping
File size: 1,476 Bytes
6d57a66 e20d2a1 6d57a66 e20d2a1 6d57a66 e20d2a1 6d57a66 e20d2a1 6d57a66 e20d2a1 6d57a66 e20d2a1 e14063c 6d57a66 e20d2a1 e14063c e20d2a1 b72c02f e20d2a1 b72c02f e20d2a1 e14063c e20d2a1 6d57a66 e20d2a1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import streamlit as st
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
from peft import PeftModel
import torch
# Load model and tokenizer with adapter
@st.cache_resource
def load_model():
base_model = "Qwen/Qwen3-0.6B"
adapter_path = "faizabenatmane/Qwen-3-0.6B"
tokenizer = AutoTokenizer.from_pretrained(base_model)
base = AutoModelForSequenceClassification.from_pretrained(
base_model,
num_labels=2,
device_map="cpu"
)
model = PeftModel.from_pretrained(base, adapter_path)
model = model.merge_and_unload()
# ✅ Text classification pipeline
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
return pipe
# Load pipeline once
classifier = load_model()
# Streamlit UI
st.title("📰 Fake News Detection")
text = st.text_area("Enter a news statement or claim:", height=200)
if st.button("Check"):
with st.spinner("Analyzing..."):
# Get classification result
result = classifier(text)[0]
label = result['label']
score = result['score']
# Optional: format label nicely
if "1" in label or "POSITIVE" in label.upper():
verdict = "Real"
emoji = "✅"
else:
verdict = "Fake"
emoji = "❌"
# Show result
st.subheader("Prediction")
st.success(f"{emoji} The statement is likely: **{verdict}** (confidence: {score:.2f})")
|