Spaces:
Running
Running
Update modules/config_settings_public.py
Browse files
modules/config_settings_public.py
CHANGED
@@ -13,8 +13,8 @@ MODEL_DEFINITIONS = {
|
|
13 |
"filename": "mistral-7b-instruct-v0.2.Q2_K.gguf"
|
14 |
},
|
15 |
"gemma": {
|
16 |
-
"repo_id": "
|
17 |
-
"filename": "gemma-2b-it.
|
18 |
},
|
19 |
"qwen": {
|
20 |
"repo_id": "Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
@@ -28,7 +28,7 @@ logger.info("β
β
β
RUNNING IN WEB DEMO MODE (True Fusion - CPU Survival) β
|
|
28 |
N_GPU_LAYERS_FALLBACK = 0 # Force CPU-only mode
|
29 |
|
30 |
for name, model_info in MODEL_DEFINITIONS.items():
|
31 |
-
logger.info(f"Downloading demo model: {name}...")
|
32 |
try:
|
33 |
MODEL_PATHS[name] = hf_hub_download(repo_id=model_info["repo_id"], filename=model_info["filename"])
|
34 |
logger.info(f"β
Successfully downloaded {name}")
|
|
|
13 |
"filename": "mistral-7b-instruct-v0.2.Q2_K.gguf"
|
14 |
},
|
15 |
"gemma": {
|
16 |
+
"repo_id": "google/gemma-2b-it-gguf", # β
Using the OFFICIAL Google repository
|
17 |
+
"filename": "gemma-2b-it.gguf" # β
This is the correct filename
|
18 |
},
|
19 |
"qwen": {
|
20 |
"repo_id": "Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
|
|
28 |
N_GPU_LAYERS_FALLBACK = 0 # Force CPU-only mode
|
29 |
|
30 |
for name, model_info in MODEL_DEFINITIONS.items():
|
31 |
+
logger.info(f"Downloading demo model: {name} from {model_info['repo_id']}...")
|
32 |
try:
|
33 |
MODEL_PATHS[name] = hf_hub_download(repo_id=model_info["repo_id"], filename=model_info["filename"])
|
34 |
logger.info(f"β
Successfully downloaded {name}")
|