Spaces:
Sleeping
Sleeping
import os | |
from dataclasses import dataclass | |
from typing import Optional | |
from pathlib import Path | |
from dotenv import load_dotenv | |
load_dotenv() | |
class Settings: | |
"""Application-wide configuration settings.""" | |
# LLM Provider settings | |
llm_provider: str = os.getenv("LLM_PROVIDER", "auto") | |
# Hugging Face settings | |
hf_token: str = os.getenv("HF_TOKEN", "") | |
hf_chat_model: str = os.getenv("HF_CHAT_MODEL", "Qwen/Qwen2.5-7B-Instruct") | |
hf_temperature: float = 0.001 | |
hf_max_new_tokens: int = 512 | |
# Model settings | |
model_name: str = os.getenv("MODEL_NAME", "Qwen/Qwen2.5-7B-Instruct") | |
# Audio provider settings | |
audio_provider: str = os.getenv("AUDIO_PROVIDER", "auto") | |
tts_model: str = os.getenv("TTS_MODEL", "canopylabs/orpheus-3b-0.1-ft") | |
stt_model: str = os.getenv("STT_MODEL", "openai/whisper-large-v3") | |
# Screen sharing settings | |
screen_capture_interval: float = float(os.getenv("SCREEN_CAPTURE_INTERVAL", "1.0")) | |
screen_compression_quality: int = int(os.getenv("SCREEN_COMPRESSION_QUALITY", "50")) | |
max_width: int = int(os.getenv("SCREEN_MAX_WIDTH", "3440")) | |
max_height: int = int(os.getenv("SCREEN_MAX_HEIGHT", "1440")) | |
NEBIUS_MODEL: str = os.getenv("NEBIUS_MODEL", "google/gemma-3-27b-it") | |
NEBIUS_API_KEY: str = os.getenv("NEBIUS_API_KEY", "Not found") | |
NEBIUS_BASE_URL: str = os.getenv("NEBIUS_BASE_URL", "https://api.studio.nebius.com/v1/") | |
# Hyper-V settings | |
hyperv_enabled: bool = os.getenv("HYPERV_ENABLED", "false").lower() == "true" | |
hyperv_host: str = os.getenv("HYPERV_HOST", "localhost") | |
hyperv_username: Optional[str] = os.getenv("HYPERV_USERNAME") | |
hyperv_password: Optional[str] = os.getenv("HYPERV_PASSWORD") | |
# Application settings | |
max_conversation_history: int = int(os.getenv("MAX_CONVERSATION_HISTORY", "50")) | |
temp_dir: str = os.getenv("TEMP_DIR", "./temp") | |
log_level: str = os.getenv("LOG_LEVEL", "INFO") | |
def __post_init__(self): | |
# Ensure necessary directories exist | |
Path(self.temp_dir).mkdir(exist_ok=True, parents=True) | |
Path("./config").mkdir(exist_ok=True, parents=True) | |
Path("./logs").mkdir(exist_ok=True, parents=True) | |
def is_hf_token_valid(self) -> bool: | |
# Mock for Spaces | |
return True | |
def effective_llm_provider(self) -> str: | |
if self.llm_provider == "auto": | |
return "huggingface" if self.is_hf_token_valid() else "openai" | |
return self.llm_provider | |
def effective_audio_provider(self) -> str: | |
if self.audio_provider == "auto": | |
return "huggingface" if self.is_hf_token_valid() else "openai" | |
return self.audio_provider | |
def llm_endpoint(self) -> str: | |
if self.effective_llm_provider == "huggingface": | |
return f"https://api-inference.huggingface.co/models/{self.hf_chat_model}" | |
return self.openai_endpoint | |
def llm_api_key(self) -> str: | |
return self.hf_token if self.effective_llm_provider == "huggingface" else self.openai_api_key | |
def effective_model_name(self) -> str: | |
return self.hf_chat_model if self.effective_llm_provider == "huggingface" else self.model_name | |