mohamedbmt commited on
Commit
e14063c
·
1 Parent(s): 6d57a66

Fix classification pipeline

Browse files
Files changed (1) hide show
  1. app.py +22 -8
app.py CHANGED
@@ -3,6 +3,7 @@ from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipe
3
  from peft import PeftModel
4
  import torch
5
 
 
6
  @st.cache_resource
7
  def load_model():
8
  base_model = "Qwen/Qwen3-0.6B"
@@ -19,19 +20,32 @@ def load_model():
19
  model = PeftModel.from_pretrained(base, adapter_path)
20
  model = model.merge_and_unload()
21
 
22
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
23
  return pipe
24
 
25
- generator = load_model()
 
26
 
 
27
  st.title("📰 Fake News Detection")
28
- text = st.text_area("Enter a statement (news or claim):", height=200)
29
 
30
  if st.button("Check"):
31
  with st.spinner("Analyzing..."):
32
- prompt = f"Determine if the following statement is fake or real:\n\n{text}\n\nAnswer:"
33
- output = generator(prompt, max_length=50, do_sample=False)[0]['generated_text']
34
- answer = output.split("Answer:")[-1].strip().split()[0]
35
-
 
 
 
 
 
 
 
 
 
 
36
  st.subheader("Prediction")
37
- st.success(f"🧠 The statement is likely: **{answer}**")
 
3
  from peft import PeftModel
4
  import torch
5
 
6
+ # Load model and tokenizer with adapter
7
  @st.cache_resource
8
  def load_model():
9
  base_model = "Qwen/Qwen3-0.6B"
 
20
  model = PeftModel.from_pretrained(base, adapter_path)
21
  model = model.merge_and_unload()
22
 
23
+ # Text classification pipeline
24
+ pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
25
  return pipe
26
 
27
+ # Load pipeline once
28
+ classifier = load_model()
29
 
30
+ # Streamlit UI
31
  st.title("📰 Fake News Detection")
32
+ text = st.text_area("Enter a news statement or claim:", height=200)
33
 
34
  if st.button("Check"):
35
  with st.spinner("Analyzing..."):
36
+ # Get classification result
37
+ result = classifier(text)[0]
38
+ label = result['label']
39
+ score = result['score']
40
+
41
+ # Optional: format label nicely
42
+ if "1" in label or "POSITIVE" in label.upper():
43
+ verdict = "Real"
44
+ emoji = "✅"
45
+ else:
46
+ verdict = "Fake"
47
+ emoji = "❌"
48
+
49
+ # Show result
50
  st.subheader("Prediction")
51
+ st.success(f"{emoji} The statement is likely: **{verdict}** (confidence: {score:.2f})")