Spaces:
Running
Running
#!/usr/bin/env python3 | |
""" | |
Centralized configuration management for GAIA agent. | |
""" | |
import os | |
from typing import Dict, Optional, Any | |
from dataclasses import dataclass, field | |
from enum import Enum | |
from dotenv import load_dotenv | |
class ModelType(Enum): | |
"""Available model types.""" | |
KLUSTER = "kluster" | |
GEMINI = "gemini" | |
QWEN = "qwen" | |
class AgentType(Enum): | |
"""Available agent types.""" | |
MULTIMEDIA = "multimedia" | |
RESEARCH = "research" | |
LOGIC_MATH = "logic_math" | |
FILE_PROCESSING = "file_processing" | |
CHESS = "chess" | |
GENERAL = "general" | |
class ModelConfig: | |
"""Configuration for AI models.""" | |
# Model names | |
GEMINI_MODEL: str = "gemini/gemini-2.0-flash" | |
QWEN_MODEL: str = "Qwen/Qwen2.5-72B-Instruct" | |
CLASSIFICATION_MODEL: str = "Qwen/Qwen2.5-7B-Instruct" | |
# Kluster.ai models | |
KLUSTER_MODELS: Dict[str, str] = field(default_factory=lambda: { | |
"gemma3-27b": "openai/google/gemma-3-27b-it", | |
"qwen3-235b": "openai/Qwen/Qwen3-235B-A22B-FP8", | |
"qwen2.5-72b": "openai/Qwen/Qwen2.5-72B-Instruct", | |
"llama3.1-405b": "openai/meta-llama/Meta-Llama-3.1-405B-Instruct" | |
}) | |
# API endpoints | |
KLUSTER_API_BASE: str = "https://api.kluster.ai/v1" | |
# Model parameters | |
MAX_STEPS: int = 12 | |
VERBOSITY_LEVEL: int = 2 | |
TEMPERATURE: float = 0.7 | |
MAX_TOKENS: int = 4000 | |
# Retry settings | |
MAX_RETRIES: int = 3 | |
BASE_DELAY: float = 2.0 | |
# Memory management | |
ENABLE_FRESH_AGENTS: bool = True | |
ENABLE_TOKEN_MANAGEMENT: bool = True | |
class ToolConfig: | |
"""Configuration for tools.""" | |
# File processing limits | |
MAX_FILE_SIZE: int = 100 * 1024 * 1024 # 100MB | |
MAX_FRAMES: int = 10 | |
MAX_PROCESSING_TIME: int = 1800 # 30 minutes | |
# Cache settings | |
CACHE_TTL: int = 900 # 15 minutes | |
ENABLE_CACHING: bool = True | |
# Search settings | |
MAX_SEARCH_RESULTS: int = 10 | |
SEARCH_TIMEOUT: int = 30 | |
# YouTube settings | |
YOUTUBE_QUALITY: str = "medium" | |
MAX_VIDEO_DURATION: int = 3600 # 1 hour | |
class UIConfig: | |
"""Configuration for user interfaces.""" | |
# Gradio settings | |
SERVER_NAME: str = "0.0.0.0" | |
SERVER_PORT: int = 7860 | |
SHARE: bool = False | |
# Interface limits | |
MAX_QUESTION_LENGTH: int = 5000 | |
MAX_QUESTIONS_BATCH: int = 20 | |
DEMO_MODE: bool = False | |
class Config: | |
"""Centralized configuration management.""" | |
def __init__(self): | |
# Load environment variables | |
load_dotenv() | |
# Initialize configurations | |
self.model = ModelConfig() | |
self.tools = ToolConfig() | |
self.ui = UIConfig() | |
# API keys | |
self._api_keys = self._load_api_keys() | |
# Validation | |
self._validate_config() | |
def _load_api_keys(self) -> Dict[str, Optional[str]]: | |
"""Load API keys from environment.""" | |
return { | |
"gemini": os.getenv("GEMINI_API_KEY"), | |
"huggingface": os.getenv("HUGGINGFACE_TOKEN"), | |
"kluster": os.getenv("KLUSTER_API_KEY"), | |
"serpapi": os.getenv("SERPAPI_API_KEY") | |
} | |
def _validate_config(self) -> None: | |
"""Validate configuration and API keys.""" | |
if not any(self._api_keys.values()): | |
raise ValueError( | |
"At least one API key must be provided: " | |
"GEMINI_API_KEY, HUGGINGFACE_TOKEN, or KLUSTER_API_KEY" | |
) | |
def get_api_key(self, provider: str) -> Optional[str]: | |
"""Get API key for specific provider.""" | |
return self._api_keys.get(provider.lower()) | |
def has_api_key(self, provider: str) -> bool: | |
"""Check if API key exists for provider.""" | |
key = self.get_api_key(provider) | |
return key is not None and len(key.strip()) > 0 | |
def get_available_models(self) -> list[ModelType]: | |
"""Get list of available models based on API keys.""" | |
available = [] | |
if self.has_api_key("kluster"): | |
available.append(ModelType.KLUSTER) | |
if self.has_api_key("gemini"): | |
available.append(ModelType.GEMINI) | |
if self.has_api_key("huggingface"): | |
available.append(ModelType.QWEN) | |
return available | |
def get_fallback_chain(self) -> list[ModelType]: | |
"""Get model fallback chain based on availability.""" | |
available = self.get_available_models() | |
# Prefer Kluster -> Gemini -> Qwen | |
priority_order = [ModelType.KLUSTER, ModelType.GEMINI, ModelType.QWEN] | |
return [model for model in priority_order if model in available] | |
def debug_mode(self) -> bool: | |
"""Check if debug mode is enabled.""" | |
return os.getenv("DEBUG", "false").lower() == "true" | |
def log_level(self) -> str: | |
"""Get logging level.""" | |
return os.getenv("LOG_LEVEL", "INFO").upper() | |
# Global configuration instance | |
config = Config() |