Spaces:
Running
Running
Update modules/config_settings_public.py
Browse files
modules/config_settings_public.py
CHANGED
@@ -1,41 +1,34 @@
|
|
1 |
-
# FILE: modules/config_settings_public.py (
|
2 |
|
3 |
import os
|
4 |
import logging
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
|
7 |
-
logger = logging.getLogger("
|
8 |
-
if not logger.handlers:
|
9 |
-
handler = logging.StreamHandler()
|
10 |
-
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - [%(funcName)s] - %(message)s')
|
11 |
-
handler.setFormatter(formatter)
|
12 |
-
logger.addHandler(handler)
|
13 |
-
logger.setLevel(logging.INFO)
|
14 |
|
15 |
-
# --- β
VERIFIED WEB-OPTIMIZED MODEL SOURCES ---
|
16 |
-
# These models are smaller, faster.
|
17 |
MODEL_DEFINITIONS = {
|
18 |
"mistral": {
|
19 |
-
"repo_id": "TheBloke/Mistral-7B-Instruct-v0.
|
20 |
-
"filename": "mistral-7b-instruct-v0.2.Q2_K.gguf"
|
21 |
},
|
22 |
"gemma": {
|
23 |
-
"repo_id": "
|
24 |
-
"filename": "gemma-
|
25 |
},
|
26 |
"qwen": {
|
27 |
-
"repo_id": "
|
28 |
-
"filename": "
|
29 |
}
|
30 |
}
|
31 |
|
32 |
MODEL_PATHS = {}
|
33 |
|
34 |
-
logger.info("β
β
β
RUNNING IN WEB DEMO MODE (
|
35 |
N_GPU_LAYERS_FALLBACK = 0 # Force CPU-only mode
|
36 |
|
37 |
for name, model_info in MODEL_DEFINITIONS.items():
|
38 |
-
logger.info(f"Downloading model
|
39 |
try:
|
40 |
MODEL_PATHS[name] = hf_hub_download(repo_id=model_info["repo_id"], filename=model_info["filename"])
|
41 |
logger.info(f"β
Successfully downloaded {name}")
|
@@ -43,12 +36,13 @@ for name, model_info in MODEL_DEFINITIONS.items():
|
|
43 |
logger.error(f"β FAILED to download {name}: {e}")
|
44 |
raise e
|
45 |
|
46 |
-
# ---
|
47 |
MODEL_SPECIFIC_PARAMS = {
|
48 |
"_default": {
|
49 |
"n_gpu_layers": N_GPU_LAYERS_FALLBACK,
|
50 |
-
"n_ctx":
|
51 |
-
"n_batch":
|
|
|
52 |
"verbose": True
|
53 |
},
|
54 |
"mistral": { "chat_format": "mistral-instruct" },
|
@@ -56,15 +50,15 @@ MODEL_SPECIFIC_PARAMS = {
|
|
56 |
"qwen": { "chat_format": "chatml" }
|
57 |
}
|
58 |
|
59 |
-
# ---
|
60 |
-
INFERENCE_PRESETS = {"balanced": {"temperature": 0.7, "max_tokens":
|
61 |
DEFAULT_INFERENCE_PRESET = "balanced"
|
62 |
-
DEFAULT_SYSTEM_PROMPT = "You are ZOTHEOS, an ethical AI. Respond only in English."
|
63 |
MODEL_ROLES = {"mistral": "analyst", "gemma": "humanist", "qwen": "skeptic"}
|
64 |
MODEL_ROLE_SYSTEM_PROMPTS = {
|
65 |
-
"analyst": "You are an
|
66 |
-
"humanist": "You are
|
67 |
-
"skeptic": "You are a
|
68 |
}
|
69 |
|
70 |
-
logger.info("β
Hugging Face Demo Configuration Loaded
|
|
|
1 |
+
# FILE: modules/config_settings_public.py (HF Demo - v8.0)
|
2 |
|
3 |
import os
|
4 |
import logging
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
|
7 |
+
logger = logging.getLogger("ZOTHEOS_Config_HF")
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
+
# --- β
DEFINITIVE & VERIFIED WEB-OPTIMIZED MODEL SOURCES ---
|
|
|
10 |
MODEL_DEFINITIONS = {
|
11 |
"mistral": {
|
12 |
+
"repo_id": "TheBloke/Mistral-7B-Instruct-v0.2-GGUF",
|
13 |
+
"filename": "mistral-7b-instruct-v0.2.Q2_K.gguf"
|
14 |
},
|
15 |
"gemma": {
|
16 |
+
"repo_id": "TheBloke/gemma-2b-it-GGUF",
|
17 |
+
"filename": "gemma-2b-it.Q2_K.gguf"
|
18 |
},
|
19 |
"qwen": {
|
20 |
+
"repo_id": "Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
21 |
+
"filename": "qwen1_5-0.5b-chat-q2_k.gguf"
|
22 |
}
|
23 |
}
|
24 |
|
25 |
MODEL_PATHS = {}
|
26 |
|
27 |
+
logger.info("β
β
β
RUNNING IN WEB DEMO MODE (True Fusion - CPU Survival) β
β
β
")
|
28 |
N_GPU_LAYERS_FALLBACK = 0 # Force CPU-only mode
|
29 |
|
30 |
for name, model_info in MODEL_DEFINITIONS.items():
|
31 |
+
logger.info(f"Downloading demo model: {name}...")
|
32 |
try:
|
33 |
MODEL_PATHS[name] = hf_hub_download(repo_id=model_info["repo_id"], filename=model_info["filename"])
|
34 |
logger.info(f"β
Successfully downloaded {name}")
|
|
|
36 |
logger.error(f"β FAILED to download {name}: {e}")
|
37 |
raise e
|
38 |
|
39 |
+
# --- β
CPU-OPTIMIZED MODEL PARAMETERS ---
|
40 |
MODEL_SPECIFIC_PARAMS = {
|
41 |
"_default": {
|
42 |
"n_gpu_layers": N_GPU_LAYERS_FALLBACK,
|
43 |
+
"n_ctx": 1024,
|
44 |
+
"n_batch": 256,
|
45 |
+
"n_threads": 4,
|
46 |
"verbose": True
|
47 |
},
|
48 |
"mistral": { "chat_format": "mistral-instruct" },
|
|
|
50 |
"qwen": { "chat_format": "chatml" }
|
51 |
}
|
52 |
|
53 |
+
# --- TIER INFERENCE & PROMPTS ---
|
54 |
+
INFERENCE_PRESETS = {"balanced": {"temperature": 0.7, "max_tokens": 256}}
|
55 |
DEFAULT_INFERENCE_PRESET = "balanced"
|
56 |
+
DEFAULT_SYSTEM_PROMPT = "You are ZOTHEOS, an ethical AI. Respond concisely and only in English."
|
57 |
MODEL_ROLES = {"mistral": "analyst", "gemma": "humanist", "qwen": "skeptic"}
|
58 |
MODEL_ROLE_SYSTEM_PROMPTS = {
|
59 |
+
"analyst": "You are an analyst. Be logical. Respond only in English.",
|
60 |
+
"humanist": "You are a humanist. Focus on values. Respond only in English.",
|
61 |
+
"skeptic": "You are a skeptic. Challenge the premise. Respond only in English.",
|
62 |
}
|
63 |
|
64 |
+
logger.info("β
Hugging Face Demo (CPU Survival Mode) Configuration Loaded.")
|