Spaces:
Sleeping
Sleeping
Update modules/config_settings_public.py
Browse files- modules/config_settings_public.py +102 -143
modules/config_settings_public.py
CHANGED
@@ -1,143 +1,102 @@
|
|
1 |
-
# FILE: modules/config_settings_public.py
|
2 |
-
|
3 |
-
import os
|
4 |
-
import sys
|
5 |
-
import logging
|
6 |
-
from huggingface_hub import hf_hub_download
|
7 |
-
|
8 |
-
logger = logging.getLogger("ZOTHEOS_Config")
|
9 |
-
if not logger.handlers:
|
10 |
-
handler = logging.StreamHandler(sys.stdout)
|
11 |
-
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - [%(funcName)s] - %(message)s')
|
12 |
-
handler.setFormatter(formatter)
|
13 |
-
logger.addHandler(handler)
|
14 |
-
logger.setLevel(logging.INFO)
|
15 |
-
|
16 |
-
# ---
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
"n_ctx": N_CTX_FALLBACK
|
104 |
-
}
|
105 |
-
}
|
106 |
-
|
107 |
-
INFERENCE_PRESETS = {
|
108 |
-
"balanced": {"temperature": 0.7, "top_p": 0.9, "top_k": 40, "repeat_penalty": 1.1, "mirostat_mode": 0, "max_tokens": 1024},
|
109 |
-
"precise": {"temperature": 0.2, "top_p": 0.7, "top_k": 20, "repeat_penalty": 1.05, "mirostat_mode": 0, "max_tokens": 1536},
|
110 |
-
"creative": {"temperature": 0.9, "top_p": 0.95, "top_k": 60, "repeat_penalty": 1.15, "mirostat_mode": 2, "mirostat_tau": 4.0, "mirostat_eta": 0.1, "max_tokens": 1024},
|
111 |
-
"passthrough": {}
|
112 |
-
}
|
113 |
-
DEFAULT_INFERENCE_PRESET = "balanced"
|
114 |
-
|
115 |
-
DEFAULT_SYSTEM_PROMPT = "You are ZOTHEOS, an ethical AI developed to help humanity. Provide clear, concise, and helpful responses. Be respectful and avoid harmful content."
|
116 |
-
SYSTEM_PERSONAS = {
|
117 |
-
"default": DEFAULT_SYSTEM_PROMPT, "helpful_assistant": "You are a helpful AI assistant. Your goal is to provide accurate and informative answers.",
|
118 |
-
"philosopher": "You are an AI philosopher. Engage with complex questions thoughtfully and explore different perspectives.",
|
119 |
-
"coder": "You are an expert AI programmer. Provide code examples and explain them clearly. Assume a senior developer audience.",
|
120 |
-
"concise_summarizer": "You are an AI tasked with providing very concise summaries. Get straight to the point. Use bullet points where appropriate.",
|
121 |
-
}
|
122 |
-
|
123 |
-
MODEL_ROLES = { "mistral": "analyst", "gemma": "humanist", "qwen": "skeptic" }
|
124 |
-
MODEL_ROLE_SYSTEM_PROMPTS = {
|
125 |
-
"analyst": "You are an impartial analyst. Focus on facts, clarity, and cause-effect logic. Provide structured, evidence-based reasoning.",
|
126 |
-
"humanist": "You are a human-centered assistant. Focus on emotion, empathy, ethical considerations, and the potential human impact or experience related to the query.",
|
127 |
-
"skeptic": "You are a critical evaluator and a respectful skeptic. Your role is to challenge assumptions, highlight potential risks, point out biases, and explore alternative or less obvious interpretations. Question the premises if necessary.",
|
128 |
-
"general": DEFAULT_SYSTEM_PROMPT
|
129 |
-
}
|
130 |
-
|
131 |
-
MODEL_WEIGHTS = { "mistral": 1.0, "gemma": 0.9, "qwen": 1.1 }
|
132 |
-
LOG_LEVEL = "INFO"
|
133 |
-
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - [%(funcName)s] - %(message)s'
|
134 |
-
|
135 |
-
ENV_VARS_TO_SET = {
|
136 |
-
"TRANSFORMERS_CACHE": CORE_DIRS_TO_VERIFY["cache_transformers"], "HF_HOME": CORE_DIRS_TO_VERIFY["cache_huggingface"],
|
137 |
-
"HF_HUB_CACHE": CORE_DIRS_TO_VERIFY["cache_hf_hub"], "TOKENIZERS_PARALLELISM": "false",
|
138 |
-
}
|
139 |
-
|
140 |
-
ZOTHEOS_VERSION = "Public Beta 1.4 (Web Enabled)"
|
141 |
-
|
142 |
-
logger.info(f"Config settings loaded. Version: {ZOTHEOS_VERSION}")
|
143 |
-
logger.info(f"APP_DIR: {APP_DIR} (Frozen: {_is_frozen}) | Web Mode: {IS_WEB_MODE}")
|
|
|
1 |
+
# FILE: modules/config_settings_public.py (Corrected Version)
|
2 |
+
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
import logging
|
6 |
+
from huggingface_hub import hf_hub_download
|
7 |
+
|
8 |
+
logger = logging.getLogger("ZOTHEOS_Config")
|
9 |
+
if not logger.handlers:
|
10 |
+
handler = logging.StreamHandler(sys.stdout)
|
11 |
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - [%(funcName)s] - %(message)s')
|
12 |
+
handler.setFormatter(formatter)
|
13 |
+
logger.addHandler(handler)
|
14 |
+
logger.setLevel(logging.INFO)
|
15 |
+
|
16 |
+
# --- Standard way to detect if we are running in a Hugging Face Space ---
|
17 |
+
IS_WEB_MODE = "HF_SPACE_ID" in os.environ
|
18 |
+
|
19 |
+
# --- Define Model Repositories and Filenames ONCE ---
|
20 |
+
MODEL_DEFINITIONS = {
|
21 |
+
"mistral": {
|
22 |
+
"repo_id": "TheBloke/Mistral-7B-Instruct-v0.2-GGUF",
|
23 |
+
"filename": "mistral-7b-instruct-v0.2.Q4_K_M.gguf"
|
24 |
+
},
|
25 |
+
"gemma": {
|
26 |
+
"repo_id": "google/gemma-2b-it-gguf",
|
27 |
+
"filename": "gemma-2b-it.Q4_K_M.gguf"
|
28 |
+
},
|
29 |
+
"qwen": {
|
30 |
+
"repo_id": "Qwen/Qwen1.5-1.8B-Chat-GGUF",
|
31 |
+
"filename": "qwen1.5-1.8b-chat.Q4_K_M.gguf"
|
32 |
+
}
|
33 |
+
}
|
34 |
+
|
35 |
+
# --- Initialize MODEL_PATHS dictionary ---
|
36 |
+
MODEL_PATHS = {}
|
37 |
+
|
38 |
+
# --- Set up paths and GPU layers based on environment ---
|
39 |
+
if IS_WEB_MODE:
|
40 |
+
logger.info("β
β
β
RUNNING IN WEB MODE (Hugging Face Space) β
β
β
")
|
41 |
+
logger.info("Model paths will be resolved by hf_hub_download.")
|
42 |
+
|
43 |
+
for name, model_info in MODEL_DEFINITIONS.items():
|
44 |
+
logger.info(f"Downloading model: {name}")
|
45 |
+
MODEL_PATHS[name] = hf_hub_download(repo_id=model_info["repo_id"], filename=model_info["filename"])
|
46 |
+
|
47 |
+
N_GPU_LAYERS_FALLBACK = 0
|
48 |
+
logger.info("N_GPU_LAYERS_FALLBACK forced to 0 for CPU-only web environment.")
|
49 |
+
|
50 |
+
else: # LOCAL MODE
|
51 |
+
logger.info("β
β
β
RUNNING IN LOCAL MODE (Desktop/PC) β
β
β
")
|
52 |
+
_is_frozen = getattr(sys, 'frozen', False)
|
53 |
+
if _is_frozen:
|
54 |
+
APP_DIR = os.path.dirname(sys.executable)
|
55 |
+
else:
|
56 |
+
APP_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
57 |
+
|
58 |
+
BASE_MODELS_DIR = os.path.join(APP_DIR, "models")
|
59 |
+
logger.info(f"Models will be loaded from local directory: {BASE_MODELS_DIR}")
|
60 |
+
|
61 |
+
for name, model_info in MODEL_DEFINITIONS.items():
|
62 |
+
MODEL_PATHS[name] = os.path.join(BASE_MODELS_DIR, model_info["filename"])
|
63 |
+
|
64 |
+
N_GPU_LAYERS_FALLBACK = -1
|
65 |
+
logger.info("N_GPU_LAYERS_FALLBACK set to -1 for local GPU acceleration.")
|
66 |
+
|
67 |
+
|
68 |
+
# --- Shared Configurations (The rest of the file remains mostly the same) ---
|
69 |
+
MAX_RAM_MODELS_GB = 23.8
|
70 |
+
MAX_CONCURRENT_MODELS = 3
|
71 |
+
N_CTX_FALLBACK = 2048
|
72 |
+
N_THREADS_FALLBACK = 8
|
73 |
+
VERBOSE_LLAMA_CPP = True
|
74 |
+
|
75 |
+
MODEL_SPECIFIC_PARAMS = {
|
76 |
+
"mistral": { "chat_format": "mistral-instruct" },
|
77 |
+
"gemma": { "chat_format": "gemma" },
|
78 |
+
"qwen": { "chat_format": "chatml" },
|
79 |
+
"_default": {
|
80 |
+
"f16_kv": True, "use_mmap": True, "use_mlock": False,
|
81 |
+
"verbose": VERBOSE_LLAMA_CPP,
|
82 |
+
"n_gpu_layers": N_GPU_LAYERS_FALLBACK,
|
83 |
+
"n_threads": N_THREADS_FALLBACK,
|
84 |
+
"n_ctx": N_CTX_FALLBACK
|
85 |
+
}
|
86 |
+
}
|
87 |
+
|
88 |
+
INFERENCE_PRESETS = {
|
89 |
+
"balanced": {"temperature": 0.7, "top_p": 0.9, "top_k": 40, "repeat_penalty": 1.1, "mirostat_mode": 0, "max_tokens": 1024},
|
90 |
+
"precise": {"temperature": 0.2, "top_p": 0.7, "top_k": 20, "repeat_penalty": 1.05, "mirostat_mode": 0, "max_tokens": 1536},
|
91 |
+
"creative": {"temperature": 0.9, "top_p": 0.95, "top_k": 60, "repeat_penalty": 1.15, "mirostat_mode": 2, "mirostat_tau": 4.0, "mirostat_eta": 0.1, "max_tokens": 1024},
|
92 |
+
"passthrough": {}
|
93 |
+
}
|
94 |
+
DEFAULT_INFERENCE_PRESET = "balanced"
|
95 |
+
|
96 |
+
# ... (The rest of your file: SYSTEM_PROMPTS, MODEL_ROLES, etc. can stay the same) ...
|
97 |
+
|
98 |
+
DEFAULT_SYSTEM_PROMPT = "You are ZOTHEOS, an ethical AI developed to help humanity. Provide clear, concise, and helpful responses. Be respectful and avoid harmful content."
|
99 |
+
ZOTHEOS_VERSION = "Public Beta 1.5 (Web Live)"
|
100 |
+
|
101 |
+
logger.info(f"Config settings loaded. Version: {ZOTHEOS_VERSION}")
|
102 |
+
logger.info(f"Web Mode: {IS_WEB_MODE}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|