Spaces:
Sleeping
Sleeping
Commit
·
300bedb
1
Parent(s):
d8c4d96
Fix: update dependencies,deepseek
Browse files- app.py +15 -33
- requirements.txt +4 -4
app.py
CHANGED
@@ -1,50 +1,32 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import
|
3 |
from peft import PeftModel
|
4 |
import torch
|
5 |
|
6 |
@st.cache_resource
|
7 |
def load_model():
|
8 |
-
base_model = "
|
9 |
-
adapter_path = "faizabenatmane/
|
10 |
|
11 |
tokenizer = AutoTokenizer.from_pretrained(base_model)
|
12 |
-
|
13 |
-
base = AutoModelForSequenceClassification.from_pretrained(
|
14 |
-
base_model,
|
15 |
-
num_labels=2,
|
16 |
-
device_map="cpu"
|
17 |
-
)
|
18 |
|
19 |
model = PeftModel.from_pretrained(base, adapter_path)
|
20 |
model = model.merge_and_unload()
|
21 |
|
22 |
-
pipe = pipeline("text-
|
23 |
return pipe
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
# Streamlit UI
|
28 |
-
st.title("📰 Fake News Detection")
|
29 |
-
text = st.text_area("Enter a news statement or claim:", height=200)
|
30 |
-
|
31 |
-
if st.button("Check"):
|
32 |
-
with st.spinner("Analyzing..."):
|
33 |
-
result = classifier(text)[0]
|
34 |
-
label = result['label']
|
35 |
-
score = result['score']
|
36 |
|
37 |
-
|
|
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
emoji = "❌"
|
45 |
-
else:
|
46 |
-
verdict = f"Unclear ({label})"
|
47 |
-
emoji = "🤔"
|
48 |
|
49 |
-
st.subheader("
|
50 |
-
st.success(
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
3 |
from peft import PeftModel
|
4 |
import torch
|
5 |
|
6 |
@st.cache_resource
|
7 |
def load_model():
|
8 |
+
base_model = "deepseek-ai/deepseek-coder-5.7b-base"
|
9 |
+
adapter_path = "faizabenatmane/deepseek-coder-5.7bmqa-finetuned"
|
10 |
|
11 |
tokenizer = AutoTokenizer.from_pretrained(base_model)
|
12 |
+
base = AutoModelForCausalLM.from_pretrained(base_model, torch_dtype=torch.float16, device_map="auto")
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
model = PeftModel.from_pretrained(base, adapter_path)
|
15 |
model = model.merge_and_unload()
|
16 |
|
17 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
18 |
return pipe
|
19 |
|
20 |
+
generator = load_model()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
st.title("🧠 DeepSeek QA (Generation)")
|
23 |
+
text = st.text_area("Ask a coding or general question:", height=200)
|
24 |
|
25 |
+
if st.button("Generate Answer"):
|
26 |
+
with st.spinner("Generating..."):
|
27 |
+
prompt = f"Question: {text}\nAnswer:"
|
28 |
+
output = generator(prompt, max_new_tokens=100, do_sample=False)[0]["generated_text"]
|
29 |
+
answer = output.split("Answer:")[-1].strip()
|
|
|
|
|
|
|
|
|
30 |
|
31 |
+
st.subheader("Generated Answer")
|
32 |
+
st.success(answer)
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
transformers
|
2 |
-
torch
|
3 |
-
peft
|
4 |
-
streamlit
|
|
|
1 |
+
transformers==4.40.1
|
2 |
+
torch==2.2.2
|
3 |
+
peft==0.9.0
|
4 |
+
streamlit==1.34.0
|