mobinln commited on
Commit
7dc745f
·
verified ·
1 Parent(s): f14ab1b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -33,7 +33,7 @@ def respond(
33
 
34
  try:
35
  stream = client.chat.completions.create(
36
- model="qwen3", # ⚠️ Replace it with the name of the model loaded by your llama.cpp
37
  messages=messages,
38
  max_tokens=max_tokens,
39
  temperature=temperature,
 
33
 
34
  try:
35
  stream = client.chat.completions.create(
36
+ model="Deepseek-R1-0528-Qwen3-8B", # ⚠️ Replace it with the name of the model loaded by your llama.cpp
37
  messages=messages,
38
  max_tokens=max_tokens,
39
  temperature=temperature,