APP / app.py
mohamedbmt's picture
Fix: update dependencies,deepseek
300bedb
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from peft import PeftModel
import torch
@st.cache_resource
def load_model():
base_model = "deepseek-ai/deepseek-coder-5.7b-base"
adapter_path = "faizabenatmane/deepseek-coder-5.7bmqa-finetuned"
tokenizer = AutoTokenizer.from_pretrained(base_model)
base = AutoModelForCausalLM.from_pretrained(base_model, torch_dtype=torch.float16, device_map="auto")
model = PeftModel.from_pretrained(base, adapter_path)
model = model.merge_and_unload()
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
return pipe
generator = load_model()
st.title("🧠 DeepSeek QA (Generation)")
text = st.text_area("Ask a coding or general question:", height=200)
if st.button("Generate Answer"):
with st.spinner("Generating..."):
prompt = f"Question: {text}\nAnswer:"
output = generator(prompt, max_new_tokens=100, do_sample=False)[0]["generated_text"]
answer = output.split("Answer:")[-1].strip()
st.subheader("Generated Answer")
st.success(answer)