Spaces:
Running
Running
Update modules/config_settings_public.py
Browse files
modules/config_settings_public.py
CHANGED
@@ -16,15 +16,15 @@ if not logger.handlers:
|
|
16 |
# These models are smaller, faster.
|
17 |
MODEL_DEFINITIONS = {
|
18 |
"mistral": {
|
19 |
-
"repo_id": "TheBloke/Mistral-7B-Instruct-v0.
|
20 |
"filename": "mistral-7b-instruct-v0.2.Q2_K.gguf" # Smallest quantization for speed
|
21 |
},
|
22 |
"gemma": {
|
23 |
-
"repo_id": "google/gemma-
|
24 |
"filename": "gemma-7b-it.gguf"
|
25 |
},
|
26 |
"qwen": {
|
27 |
-
"repo_id": "second-state/Qwen1.5-1.8B-Chat-
|
28 |
"filename": "Qwen1.5-1.8B-Chat-Q2_K.gguf"
|
29 |
}
|
30 |
}
|
|
|
16 |
# These models are smaller, faster.
|
17 |
MODEL_DEFINITIONS = {
|
18 |
"mistral": {
|
19 |
+
"repo_id": "TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
|
20 |
"filename": "mistral-7b-instruct-v0.2.Q2_K.gguf" # Smallest quantization for speed
|
21 |
},
|
22 |
"gemma": {
|
23 |
+
"repo_id": "google/gemma-7b-it.gguf", # Using the official Google repository
|
24 |
"filename": "gemma-7b-it.gguf"
|
25 |
},
|
26 |
"qwen": {
|
27 |
+
"repo_id": "second-state/Qwen1.5-1.8B-Chat-Q2_K.gguf",
|
28 |
"filename": "Qwen1.5-1.8B-Chat-Q2_K.gguf"
|
29 |
}
|
30 |
}
|