Spaces:
Running
Running
Update app2.py
Browse files
app2.py
CHANGED
@@ -25,15 +25,8 @@ MODELS = {
|
|
25 |
"base_url": "https://api.deepseek.com",
|
26 |
"env_key": "DEEPSEEK_API_KEY",
|
27 |
"model_name_for_api": "deepseek-chat", # Use the specific model name required by DeepSeek API
|
28 |
-
}
|
29 |
-
|
30 |
-
"base_url": "https://api-inference.huggingface.co/v1/", # Check if correct for chat completions
|
31 |
-
"env_key": "HF_TOKEN",
|
32 |
-
# Note: HF Inference API might use a different endpoint or format for chat completions.
|
33 |
-
# This base URL might be for text-generation. Adjust if needed.
|
34 |
-
# Also, the model name might need /chat/completions appended or similar.
|
35 |
-
"model_name_for_api": "Qwen/Qwen2.5-Coder-32B-Instruct", # Usually the model ID on HF
|
36 |
-
},
|
37 |
# Example using a local server (like LM Studio, Ollama)
|
38 |
# "Local Model (via Ollama)": {
|
39 |
# "base_url": "http://localhost:11434/v1", # Ollama's OpenAI-compatible endpoint
|
|
|
25 |
"base_url": "https://api.deepseek.com",
|
26 |
"env_key": "DEEPSEEK_API_KEY",
|
27 |
"model_name_for_api": "deepseek-chat", # Use the specific model name required by DeepSeek API
|
28 |
+
}
|
29 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
# Example using a local server (like LM Studio, Ollama)
|
31 |
# "Local Model (via Ollama)": {
|
32 |
# "base_url": "http://localhost:11434/v1", # Ollama's OpenAI-compatible endpoint
|