Spaces:
Sleeping
Sleeping
File size: 1,119 Bytes
6d57a66 300bedb 6d57a66 e20d2a1 6d57a66 300bedb 6d57a66 300bedb 6d57a66 300bedb 6d57a66 300bedb e14063c 300bedb 26e5195 300bedb e14063c 300bedb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from peft import PeftModel
import torch
@st.cache_resource
def load_model():
base_model = "deepseek-ai/deepseek-coder-5.7b-base"
adapter_path = "faizabenatmane/deepseek-coder-5.7bmqa-finetuned"
tokenizer = AutoTokenizer.from_pretrained(base_model)
base = AutoModelForCausalLM.from_pretrained(base_model, torch_dtype=torch.float16, device_map="auto")
model = PeftModel.from_pretrained(base, adapter_path)
model = model.merge_and_unload()
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
return pipe
generator = load_model()
st.title("🧠 DeepSeek QA (Generation)")
text = st.text_area("Ask a coding or general question:", height=200)
if st.button("Generate Answer"):
with st.spinner("Generating..."):
prompt = f"Question: {text}\nAnswer:"
output = generator(prompt, max_new_tokens=100, do_sample=False)[0]["generated_text"]
answer = output.split("Answer:")[-1].strip()
st.subheader("Generated Answer")
st.success(answer)
|