mohamedbmt commited on
Commit
300bedb
·
1 Parent(s): d8c4d96

Fix: update dependencies,deepseek

Browse files
Files changed (2) hide show
  1. app.py +15 -33
  2. requirements.txt +4 -4
app.py CHANGED
@@ -1,50 +1,32 @@
1
  import streamlit as st
2
- from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
3
  from peft import PeftModel
4
  import torch
5
 
6
  @st.cache_resource
7
  def load_model():
8
- base_model = "Qwen/Qwen3-0.6B"
9
- adapter_path = "faizabenatmane/Qwen-3-0.6B"
10
 
11
  tokenizer = AutoTokenizer.from_pretrained(base_model)
12
-
13
- base = AutoModelForSequenceClassification.from_pretrained(
14
- base_model,
15
- num_labels=2,
16
- device_map="cpu"
17
- )
18
 
19
  model = PeftModel.from_pretrained(base, adapter_path)
20
  model = model.merge_and_unload()
21
 
22
- pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
23
  return pipe
24
 
25
- classifier = load_model()
26
-
27
- # Streamlit UI
28
- st.title("📰 Fake News Detection")
29
- text = st.text_area("Enter a news statement or claim:", height=200)
30
-
31
- if st.button("Check"):
32
- with st.spinner("Analyzing..."):
33
- result = classifier(text)[0]
34
- label = result['label']
35
- score = result['score']
36
 
37
- st.write("🔎 Raw label:", label) # Debug print
 
38
 
39
- if label == "LABEL_1":
40
- verdict = "Real"
41
- emoji = ""
42
- elif label == "LABEL_0":
43
- verdict = "Fake"
44
- emoji = "❌"
45
- else:
46
- verdict = f"Unclear ({label})"
47
- emoji = "🤔"
48
 
49
- st.subheader("Prediction")
50
- st.success(f"{emoji} The statement is likely: **{verdict}** (confidence: {score:.2f})")
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  from peft import PeftModel
4
  import torch
5
 
6
  @st.cache_resource
7
  def load_model():
8
+ base_model = "deepseek-ai/deepseek-coder-5.7b-base"
9
+ adapter_path = "faizabenatmane/deepseek-coder-5.7bmqa-finetuned"
10
 
11
  tokenizer = AutoTokenizer.from_pretrained(base_model)
12
+ base = AutoModelForCausalLM.from_pretrained(base_model, torch_dtype=torch.float16, device_map="auto")
 
 
 
 
 
13
 
14
  model = PeftModel.from_pretrained(base, adapter_path)
15
  model = model.merge_and_unload()
16
 
17
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
18
  return pipe
19
 
20
+ generator = load_model()
 
 
 
 
 
 
 
 
 
 
21
 
22
+ st.title("🧠 DeepSeek QA (Generation)")
23
+ text = st.text_area("Ask a coding or general question:", height=200)
24
 
25
+ if st.button("Generate Answer"):
26
+ with st.spinner("Generating..."):
27
+ prompt = f"Question: {text}\nAnswer:"
28
+ output = generator(prompt, max_new_tokens=100, do_sample=False)[0]["generated_text"]
29
+ answer = output.split("Answer:")[-1].strip()
 
 
 
 
30
 
31
+ st.subheader("Generated Answer")
32
+ st.success(answer)
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- transformers>=4.38.0
2
- torch>=2.1.0
3
- peft
4
- streamlit
 
1
+ transformers==4.40.1
2
+ torch==2.2.2
3
+ peft==0.9.0
4
+ streamlit==1.34.0