Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
from peft import PeftModel | |
import torch | |
# Load model and tokenizer with adapter | |
def load_model(): | |
base_model = "Qwen/Qwen3-0.6B" | |
adapter_path = "faizabenatmane/Qwen-3-0.6B" | |
tokenizer = AutoTokenizer.from_pretrained(base_model) | |
base = AutoModelForCausalLM.from_pretrained(base_model, torch_dtype=torch.float16, device_map="auto") | |
model = PeftModel.from_pretrained(base, adapter_path) | |
model = model.merge_and_unload() | |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
return pipe | |
generator = load_model() | |
# Streamlit UI | |
st.title("π° Fake News Detection (Text Generation)") | |
text = st.text_area("Enter a news statement or claim:", height=200) | |
if st.button("Check"): | |
with st.spinner("Analyzing..."): | |
prompt = f"Is the following statement real or fake?\n\n{text}\n\nAnswer:" | |
output = generator(prompt, max_length=50, do_sample=False)[0]['generated_text'] | |
answer = output.split("Answer:")[-1].strip().split()[0].lower() | |
if "real" in answer: | |
emoji = "β " | |
verdict = "Real" | |
elif "fake" in answer: | |
emoji = "β" | |
verdict = "Fake" | |
else: | |
emoji = "π€" | |
verdict = f"Unclear: {answer}" | |
st.subheader("Prediction") | |
st.success(f"{emoji} The statement is likely: **{verdict}**") |