ZOTHEOS commited on
Commit
ac5a3a1
Β·
verified Β·
1 Parent(s): c296b5a

Update modules/config_settings_public.py

Browse files
Files changed (1) hide show
  1. modules/config_settings_public.py +22 -28
modules/config_settings_public.py CHANGED
@@ -1,41 +1,34 @@
1
- # FILE: modules/config_settings_public.py (Hugging Face Demo - v2.0 - Verified Models)
2
 
3
  import os
4
  import logging
5
  from huggingface_hub import hf_hub_download
6
 
7
- logger = logging.getLogger("ZOTHEOS_Config")
8
- if not logger.handlers:
9
- handler = logging.StreamHandler()
10
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - [%(funcName)s] - %(message)s')
11
- handler.setFormatter(formatter)
12
- logger.addHandler(handler)
13
- logger.setLevel(logging.INFO)
14
 
15
- # --- βœ… VERIFIED WEB-OPTIMIZED MODEL SOURCES ---
16
- # These models are smaller, faster.
17
  MODEL_DEFINITIONS = {
18
  "mistral": {
19
- "repo_id": "TheBloke/Mistral-7B-Instruct-v0.1-GGUF",
20
- "filename": "mistral-7b-instruct-v0.2.Q2_K.gguf" # Smallest quantization for speed
21
  },
22
  "gemma": {
23
- "repo_id": "google/gemma-7b-it.gguf", # Using the official Google repository
24
- "filename": "gemma-7b-it.gguf"
25
  },
26
  "qwen": {
27
- "repo_id": "second-state/Qwen1.5-1.8B-Chat-Q2_K.gguf",
28
- "filename": "Qwen1.5-1.8B-Chat-Q2_K.gguf"
29
  }
30
  }
31
 
32
  MODEL_PATHS = {}
33
 
34
- logger.info("βœ…βœ…βœ… RUNNING IN WEB DEMO MODE (Hugging Face Space) βœ…βœ…βœ…")
35
  N_GPU_LAYERS_FALLBACK = 0 # Force CPU-only mode
36
 
37
  for name, model_info in MODEL_DEFINITIONS.items():
38
- logger.info(f"Downloading model for demo: {name} from {model_info['repo_id']}")
39
  try:
40
  MODEL_PATHS[name] = hf_hub_download(repo_id=model_info["repo_id"], filename=model_info["filename"])
41
  logger.info(f"βœ… Successfully downloaded {name}")
@@ -43,12 +36,13 @@ for name, model_info in MODEL_DEFINITIONS.items():
43
  logger.error(f"❌ FAILED to download {name}: {e}")
44
  raise e
45
 
46
- # --- WEB-OPTIMIZED MODEL PARAMETERS ---
47
  MODEL_SPECIFIC_PARAMS = {
48
  "_default": {
49
  "n_gpu_layers": N_GPU_LAYERS_FALLBACK,
50
- "n_ctx": 2048, # Smaller context for lower RAM
51
- "n_batch": 512,
 
52
  "verbose": True
53
  },
54
  "mistral": { "chat_format": "mistral-instruct" },
@@ -56,15 +50,15 @@ MODEL_SPECIFIC_PARAMS = {
56
  "qwen": { "chat_format": "chatml" }
57
  }
58
 
59
- # --- AGI-TIER INFERENCE & PROMPTS ---
60
- INFERENCE_PRESETS = {"balanced": {"temperature": 0.7, "max_tokens": 512}}
61
  DEFAULT_INFERENCE_PRESET = "balanced"
62
- DEFAULT_SYSTEM_PROMPT = "You are ZOTHEOS, an ethical AI. Respond only in English."
63
  MODEL_ROLES = {"mistral": "analyst", "gemma": "humanist", "qwen": "skeptic"}
64
  MODEL_ROLE_SYSTEM_PROMPTS = {
65
- "analyst": "You are an impartial analyst. Provide structured, logical insights. Respond only in English.",
66
- "humanist": "You are an empathetic AI. Focus on the emotional and ethical impact. Respond only in English.",
67
- "skeptic": "You are a respectful skeptic. Question assumptions and highlight risks. Respond only in English.",
68
  }
69
 
70
- logger.info("βœ… Hugging Face Demo Configuration Loaded Successfully.")
 
1
+ # FILE: modules/config_settings_public.py (HF Demo - v8.0)
2
 
3
  import os
4
  import logging
5
  from huggingface_hub import hf_hub_download
6
 
7
+ logger = logging.getLogger("ZOTHEOS_Config_HF")
 
 
 
 
 
 
8
 
9
+ # --- βœ… DEFINITIVE & VERIFIED WEB-OPTIMIZED MODEL SOURCES ---
 
10
  MODEL_DEFINITIONS = {
11
  "mistral": {
12
+ "repo_id": "TheBloke/Mistral-7B-Instruct-v0.2-GGUF",
13
+ "filename": "mistral-7b-instruct-v0.2.Q2_K.gguf"
14
  },
15
  "gemma": {
16
+ "repo_id": "TheBloke/gemma-2b-it-GGUF",
17
+ "filename": "gemma-2b-it.Q2_K.gguf"
18
  },
19
  "qwen": {
20
+ "repo_id": "Qwen/Qwen1.5-0.5B-Chat-GGUF",
21
+ "filename": "qwen1_5-0.5b-chat-q2_k.gguf"
22
  }
23
  }
24
 
25
  MODEL_PATHS = {}
26
 
27
+ logger.info("βœ…βœ…βœ… RUNNING IN WEB DEMO MODE (True Fusion - CPU Survival) βœ…βœ…βœ…")
28
  N_GPU_LAYERS_FALLBACK = 0 # Force CPU-only mode
29
 
30
  for name, model_info in MODEL_DEFINITIONS.items():
31
+ logger.info(f"Downloading demo model: {name}...")
32
  try:
33
  MODEL_PATHS[name] = hf_hub_download(repo_id=model_info["repo_id"], filename=model_info["filename"])
34
  logger.info(f"βœ… Successfully downloaded {name}")
 
36
  logger.error(f"❌ FAILED to download {name}: {e}")
37
  raise e
38
 
39
+ # --- βœ… CPU-OPTIMIZED MODEL PARAMETERS ---
40
  MODEL_SPECIFIC_PARAMS = {
41
  "_default": {
42
  "n_gpu_layers": N_GPU_LAYERS_FALLBACK,
43
+ "n_ctx": 1024,
44
+ "n_batch": 256,
45
+ "n_threads": 4,
46
  "verbose": True
47
  },
48
  "mistral": { "chat_format": "mistral-instruct" },
 
50
  "qwen": { "chat_format": "chatml" }
51
  }
52
 
53
+ # --- TIER INFERENCE & PROMPTS ---
54
+ INFERENCE_PRESETS = {"balanced": {"temperature": 0.7, "max_tokens": 256}}
55
  DEFAULT_INFERENCE_PRESET = "balanced"
56
+ DEFAULT_SYSTEM_PROMPT = "You are ZOTHEOS, an ethical AI. Respond concisely and only in English."
57
  MODEL_ROLES = {"mistral": "analyst", "gemma": "humanist", "qwen": "skeptic"}
58
  MODEL_ROLE_SYSTEM_PROMPTS = {
59
+ "analyst": "You are an analyst. Be logical. Respond only in English.",
60
+ "humanist": "You are a humanist. Focus on values. Respond only in English.",
61
+ "skeptic": "You are a skeptic. Challenge the premise. Respond only in English.",
62
  }
63
 
64
+ logger.info("βœ… Hugging Face Demo (CPU Survival Mode) Configuration Loaded.")