diff --git a/.claude/settings.local.json b/.claude/settings.local.json
index b700260c7823c6995f14571869ca4b2a4c183a1f..8f352f26cb4e4a557c3a491db2ac54a55d180c36 100644
--- a/.claude/settings.local.json
+++ b/.claude/settings.local.json
@@ -1,29 +1,11 @@
 {
   "permissions": {
     "allow": [
-      "Bash(mkdir:*)",
-      "Bash(python:*)",
-      "Bash(rg:*)",
       "WebFetch(domain:huggingface.co)",
-      "WebFetch(domain:huggingface.co)",
-      "WebFetch(domain:huggingface.co)",
-      "Bash(rm:*)",
       "Bash(ls:*)",
+      "Bash(tree:*)",
       "Bash(find:*)",
-      "Bash(npm create:*)",
-      "Bash(npx sv@latest create:*)",
-      "Bash(git pull:*)",
-      "Bash(git add:*)",
-      "Bash(git commit:*)",
-      "Bash(git push:*)",
-      "Bash(grep:*)",
-      "Bash(true)",
-      "Bash(awk:*)",
-      "Bash(git reset:*)",
-      "WebFetch(domain:github.com)",
-      "Bash(timeout:*)",
-      "Bash(git rm:*)",
-      "Bash(chmod:*)"
+      "Bash(mkdir:*)"
     ],
     "deny": []
   }
diff --git a/.gitattributes b/.gitattributes
deleted file mode 100644
index a6344aac8c09253b3b630fb776ae94478aa0275b..0000000000000000000000000000000000000000
--- a/.gitattributes
+++ /dev/null
@@ -1,35 +0,0 @@
-*.7z filter=lfs diff=lfs merge=lfs -text
-*.arrow filter=lfs diff=lfs merge=lfs -text
-*.bin filter=lfs diff=lfs merge=lfs -text
-*.bz2 filter=lfs diff=lfs merge=lfs -text
-*.ckpt filter=lfs diff=lfs merge=lfs -text
-*.ftz filter=lfs diff=lfs merge=lfs -text
-*.gz filter=lfs diff=lfs merge=lfs -text
-*.h5 filter=lfs diff=lfs merge=lfs -text
-*.joblib filter=lfs diff=lfs merge=lfs -text
-*.lfs.* filter=lfs diff=lfs merge=lfs -text
-*.mlmodel filter=lfs diff=lfs merge=lfs -text
-*.model filter=lfs diff=lfs merge=lfs -text
-*.msgpack filter=lfs diff=lfs merge=lfs -text
-*.npy filter=lfs diff=lfs merge=lfs -text
-*.npz filter=lfs diff=lfs merge=lfs -text
-*.onnx filter=lfs diff=lfs merge=lfs -text
-*.ot filter=lfs diff=lfs merge=lfs -text
-*.parquet filter=lfs diff=lfs merge=lfs -text
-*.pb filter=lfs diff=lfs merge=lfs -text
-*.pickle filter=lfs diff=lfs merge=lfs -text
-*.pkl filter=lfs diff=lfs merge=lfs -text
-*.pt filter=lfs diff=lfs merge=lfs -text
-*.pth filter=lfs diff=lfs merge=lfs -text
-*.rar filter=lfs diff=lfs merge=lfs -text
-*.safetensors filter=lfs diff=lfs merge=lfs -text
-saved_model/**/* filter=lfs diff=lfs merge=lfs -text
-*.tar.* filter=lfs diff=lfs merge=lfs -text
-*.tar filter=lfs diff=lfs merge=lfs -text
-*.tflite filter=lfs diff=lfs merge=lfs -text
-*.tgz filter=lfs diff=lfs merge=lfs -text
-*.wasm filter=lfs diff=lfs merge=lfs -text
-*.xz filter=lfs diff=lfs merge=lfs -text
-*.zip filter=lfs diff=lfs merge=lfs -text
-*.zst filter=lfs diff=lfs merge=lfs -text
-*tfevents* filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
index 8c82f2ac149a563527c06ea7b64a900b84f6eace..a94d71c35829d4f38a095f2372aa5b342d795ced 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,9 +16,11 @@ parts/
 sdist/
 var/
 wheels/
+share/python-wheels/
 *.egg-info/
 .installed.cfg
 *.egg
+MANIFEST
 
 # Virtual Environment
 venv/
@@ -27,47 +29,86 @@ env/
 .venv
 
 # IDE
-.idea/
 .vscode/
+.idea/
 *.swp
 *.swo
 *~
 .DS_Store
 
-# Logs
-logs/
-*.log
-
-# Data directories
-data/cache/
-data/models/
-data/saves/*.db
-
-# Frontend
-frontend/node_modules/
-frontend/.svelte-kit/
-frontend/build/
-frontend/.env
-frontend/.env.*
-
-# Local settings
-.claude/settings.local.json
-
-# Temporary files
-*.tmp
-*.temp
+# Data and Cache
+data/
+cache/
+*.cache
 .cache/
+tmp/
+temp/
 
 # Model files
 *.bin
 *.pth
 *.pt
-*.gguf
-*.safetensors
 *.onnx
+*.safetensors
+models_cache/
+
+# Generated files
+*.log
+*.png
+*.jpg
+*.jpeg
+*.gif
+*.mp3
+*.wav
+*.glb
+*.obj
+*.fbx
+*.dae
+
+# Secrets
+.env
+.env.local
+secrets.json
 
 # HuggingFace
 .huggingface/
 
+# Gradio
+flagged/
+gradio_cached_examples/
+
+# Testing
+.pytest_cache/
+.coverage
+htmlcov/
+.tox/
+.nox/
+coverage.xml
+*.cover
+.hypothesis/
+
+# Documentation
+docs/_build/
+site/
+
 # OS files
-Thumbs.db
\ No newline at end of file
+Thumbs.db
+ehthumbs.db
+Desktop.ini
+.Spotlight-V100
+.Trashes
+
+# Backup files
+*.bak
+*.backup
+*~
+
+# Custom
+/data/users/*
+/data/monsters/*
+/data/models/*
+/data/cache/*
+!/data/users/.gitkeep
+!/data/monsters/.gitkeep
+!/data/models/.gitkeep
+!/data/cache/.gitkeep
\ No newline at end of file
diff --git a/CLAUDE.md b/CLAUDE.md
deleted file mode 100644
index af60250ad471d54bb5c42e4e9e0fd7026f9cf56e..0000000000000000000000000000000000000000
--- a/CLAUDE.md
+++ /dev/null
@@ -1,235 +0,0 @@
-# CLAUDE.md
-
-This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
-
-## Project Overview
-
-DigiPal is an advanced AI-powered virtual monster companion application built with Streamlit, featuring deep AI conversations using Qwen 2.5 models, Kyutai STT speech recognition, comprehensive monster care systems, sophisticated evolution mechanics, and cutting-edge 3D model generation via OmniGen2 → Hunyuan3D-2.1 → UniRig pipeline. This is a streamlined multi-component system designed for modern deployment with HuggingFace integration.
-
-## Architecture
-
-### Core Technologies
-- **Frontend**: Streamlit with modern cyberpunk UI design
-- **Backend**: FastAPI with WebSocket support for real-time updates
-- **AI Models**: Qwen 2.5-1.5B-Instruct for conversations, Kyutai STT-2.6b-en for speech
-- **3D Pipeline**: OmniGen2 → Hunyuan3D-2.1 → UniRig (text-to-image-to-3D-to-rigged)
-- **Framework**: Python 3.11+ with asyncio for concurrent operations
-- **Database**: SQLite for monster persistence with async operations
-- **Deployment**: Modern architecture with HuggingFace integration, Docker support
-
-### Component Structure
-```
-src/
-├── ai/                         # AI processing components
-│   ├── qwen_processor.py       # Qwen 2.5 conversation engine
-│   └── speech_engine.py        # Kyutai STT speech recognition
-├── core/                       # Core game logic
-│   ├── monster_engine.py       # Monster stats, evolution, persistence
-│   ├── monster_engine_dw1.py   # DW1-aligned monster mechanics (reference)
-│   └── evolution_system.py     # Evolution mechanics
-├── pipelines/                  # 3D generation pipelines
-│   └── opensource_3d_pipeline_v2.py  # Production 3D pipeline: OmniGen2→Hunyuan3D→UniRig
-├── ui/                         # User interface
-│   ├── streamlit_interface.py  # Modern Streamlit interface
-│   └── state_manager.py        # Browser state management
-├── deployment/                 # Deployment optimization
-│   └── zero_gpu_optimizer.py   # Zero GPU resource management
-└── utils/                      # Utilities
-    └── performance_tracker.py  # Performance monitoring
-```
-
-## Development Commands
-
-### Running the Application
-```bash
-# Run complete application (FastAPI + Streamlit)
-python run_digipal.py
-
-# Or run components separately:
-
-# Run FastAPI backend server
-python app.py
-
-# Run Streamlit frontend (in another terminal)
-streamlit run src/ui/streamlit_interface.py
-
-# Run with debug logging
-LOG_LEVEL=DEBUG python app.py
-
-# Run with specific configuration
-API_PORT=8081 python app.py
-
-# Run with MCP enabled
-MCP_ENDPOINT=https://your-mcp-server MCP_API_KEY=your-key python app.py
-```
-
-### Running the Svelte Frontend
-```bash
-# Navigate to frontend directory
-cd frontend
-
-# Install dependencies (first time only)
-npm install
-
-# Run development server
-npm run dev
-
-# Build for production
-npm run build
-
-# Preview production build
-npm run preview
-```
-
-### Docker Development
-```bash
-# Build Docker image
-docker build -t digipal .
-
-# Run Docker container
-docker run -p 7860:7860 digipal
-
-# Run with volume mounting for data persistence
-docker run -p 7860:7860 -v $(pwd)/data:/app/data digipal
-```
-
-### Development Tools
-```bash
-# Code formatting (requires black installation)
-black src/
-
-# Linting (requires ruff installation)  
-ruff src/
-
-# Testing (test suite not yet implemented)
-pytest
-```
-
-## Key Implementation Details
-
-### Monster System
-- **Six-dimensional care system**: health, happiness, hunger, energy, discipline, cleanliness
-- **Real-time stat degradation**: continues even when application is offline
-- **Evolution stages**: egg → baby → child → adult → champion → ultimate
-- **Complex evolution requirements**: age, level, care quality, training, battles, social interaction
-- **Personality types**: friendly, energetic, calm, curious, brave with stat modifiers
-- **DW1 alignment**: Optional mode following Digimon World 1 mechanics
-
-### AI Conversation System
-- **Qwen 2.5 integration** with quantization support (8-bit) for GPU efficiency
-- **Kyutai STT-2.6b-en** for high-quality speech-to-text conversion
-- **Context-aware conversations** with personality-based system prompts
-- **Mood-responsive dialogue** based on current monster stats
-- **Conversation history management** with automatic truncation
-- **Flash Attention 2** optimization when available
-
-### 3D Generation Pipeline
-- **OmniGen2**: Advanced text-to-image generation with multi-view consistency
-- **Hunyuan3D-2.1**: State-of-the-art image-to-3D conversion via official HuggingFace Space API
-- **UniRig**: Automatic 3D model rigging via HuggingFace integration
-- **Complete Pipeline**: text → multi-view images → 3D mesh → rigged model
-- **Fallback Systems**: Graceful degradation when APIs are unavailable
-- **Model caching**: Efficient reuse of generated 3D assets
-- **Async generation**: Non-blocking 3D model creation
-
-### State Management
-- **Async SQLite operations** for monster persistence
-- **Browser state management** for session continuity
-- **Time-based stat updates** calculated from last interaction
-- **Cross-session persistence** maintaining monster state between visits
-
-### Zero GPU Optimization
-- **ZeroGPU decorator usage** with proper function-level application
-- **Resource detection** and optimization for Spaces deployment
-- **Memory management** with CUDA memory tracking
-- **Model quantization** for efficient GPU usage
-- **GPU wrapper functions** for AI model initialization and inference
-
-## Database Schema
-
-Monsters are stored in SQLite with JSON serialization:
-```sql
-CREATE TABLE monsters (
-    id INTEGER PRIMARY KEY AUTOINCREMENT,
-    name TEXT NOT NULL UNIQUE,
-    data TEXT NOT NULL,  -- JSON serialized Monster object
-    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
-    updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
-)
-```
-
-## Environment Variables
-
-### Core Configuration
-- `LOG_LEVEL`: Logging level (DEBUG, INFO, WARNING, ERROR)
-- `SERVER_NAME`: Server hostname (default: 0.0.0.0)
-- `SERVER_PORT`: Server port (default: 7860)
-- `SHARE`: Enable public sharing (true/false)
-- `DEBUG`: Enable debug mode (true/false)
-- `MAX_THREADS`: Maximum Gradio threads (default: 40)
-
-### Feature Toggles (V2)
-- `ENABLE_3D`: Enable 3D generation features (default: true)
-- `ENABLE_AI`: Enable AI conversation features (default: true)
-
-### GPU Configuration
-- `CUDA_VISIBLE_DEVICES`: GPU device selection
-- `TRANSFORMERS_CACHE`: Model cache directory
-- `HF_HOME`: HuggingFace cache directory
-
-### MCP Configuration
-- `MCP_ENDPOINT`: MCP service endpoint URL
-- `MCP_API_KEY`: MCP service authentication key
-
-## Hugging Face Space Configuration
-
-Configured as a Gradio Space with:
-- **SDK**: Gradio 5.34.2
-- **Hardware**: zero-gpu (ZeroGPU for efficient AI inference)
-- **Models**: Qwen/Qwen2.5-1.5B-Instruct, openai/whisper-base
-- **Storage**: medium (for model caching and monster data)
-- **Zero GPU**: @spaces.GPU decorators applied to AI-intensive functions
-
-## ZeroGPU Implementation
-
-The application uses proper ZeroGPU decorator patterns:
-
-```python
-# GPU wrapper functions for AI operations
-@spaces.GPU(duration=120)
-def gpu_generate_response(processor, prompt: str, generation_params: Dict[str, Any]) -> str:
-    # GPU-intensive AI inference
-
-@spaces.GPU(duration=60)  
-def gpu_model_initialization(model_class, model_name: str, **kwargs) -> Any:
-    # GPU-intensive model loading
-```
-
-Key ZeroGPU considerations:
-- Decorators applied at function level, not class level
-- Duration specified based on expected GPU usage time
-- Fallback to CPU operations when GPU not available
-- Wrapper functions handle Spaces environment detection
-
-## MCP (Model Context Protocol) Integration
-
-DigiPal supports MCP for flexible model deployment:
-- **ModelProvider enum**: Includes MCP alongside HUGGINGFACE, LOCAL, and SPACES
-- **MCPFluxWrapper**: Integrates Flux text-to-image models through MCP
-- **Configuration options**: MCP endpoint and API key support
-- **Server mode**: Gradio interface can run as an MCP server
-
-## Project Architecture
-
-### Backend
-- **Unified app.py**: FastAPI server on port 7861 with all features enabled
-- **Gradio Admin**: Running on port 7860 as fallback/admin interface
-- **WebSocket Support**: Real-time updates for stats and model changes
-
-### Frontend
-- **SvelteKit Application**: Located in `/frontend` directory
-- **Voice-First UI**: DigiVice-style interface with voice commands
-- **3D Rendering**: Using Threlte for monster visualization
-- **Cyberpunk-Retro Theme**: Custom styling with neon effects
-
-See [CLAUDE_SVELTE_FRONTEND_GUIDE.md](CLAUDE_SVELTE_FRONTEND_GUIDE.md) for detailed frontend documentation.
\ No newline at end of file
diff --git a/DEPLOYMENT.md b/DEPLOYMENT.md
deleted file mode 100644
index bdf29c92daef89e60046845e83d747e41282a3a6..0000000000000000000000000000000000000000
--- a/DEPLOYMENT.md
+++ /dev/null
@@ -1,213 +0,0 @@
-# DigiPal Deployment Guide
-
-## Quick Start
-
-### Prerequisites
-- Python 3.11+
-- Node.js 18+ (for Svelte frontend, if using)
-- Git
-
-### Installation
-
-1. **Clone the repository:**
-```bash
-git clone <repository-url>
-cd digiPal
-```
-
-2. **Install Python dependencies:**
-```bash
-pip install -r requirements.txt
-```
-
-3. **Set up environment variables (optional):**
-```bash
-export HF_TOKEN="your_huggingface_token"  # For private models/spaces
-export MCP_ENDPOINT="your_mcp_endpoint"   # For MCP integration
-export MCP_API_KEY="your_mcp_key"
-```
-
-### Running DigiPal
-
-#### Option 1: Complete Application (Recommended)
-```bash
-python run_digipal.py
-```
-This starts both the FastAPI backend and Streamlit frontend.
-
-**Access:**
-- **Streamlit UI**: http://localhost:8501
-- **API Backend**: http://localhost:7861
-
-#### Option 2: Manual Startup
-Terminal 1 (Backend):
-```bash
-python app.py
-```
-
-Terminal 2 (Frontend):
-```bash
-streamlit run src/ui/streamlit_interface.py
-```
-
-#### Option 3: Svelte Frontend (Advanced)
-```bash
-# Terminal 1: Start backend
-python app.py
-
-# Terminal 2: Start Svelte frontend
-cd frontend
-npm install
-npm run dev
-```
-
-## Architecture Overview
-
-### Technology Stack
-- **Frontend**: Streamlit (modern cyberpunk UI)
-- **Backend**: FastAPI with WebSocket support
-- **AI Models**: 
-  - Qwen 2.5-1.5B-Instruct (conversations)
-  - Kyutai STT-2.6b-en (speech recognition)
-- **3D Pipeline**: OmniGen2 → Hunyuan3D-2.1 → UniRig
-- **Database**: SQLite with async operations
-
-### API Endpoints
-
-**Monster Management:**
-- `GET /api/monsters` - List all monsters
-- `POST /api/monsters` - Create new monster
-- `GET /api/monsters/{id}` - Get monster details
-- `POST /api/monsters/{id}/action` - Perform care action
-- `POST /api/monsters/{id}/talk` - Send message to monster
-- `POST /api/monsters/{id}/generate-3d` - Generate 3D model
-
-**WebSocket:**
-- `WS /api/monsters/{id}/ws` - Real-time updates
-
-## Configuration
-
-### Environment Variables
-
-| Variable | Description | Default |
-|----------|-------------|---------|
-| `LOG_LEVEL` | Logging level | `INFO` |
-| `API_PORT` | FastAPI backend port | `7861` |
-| `HF_TOKEN` | HuggingFace API token | None |
-| `MCP_ENDPOINT` | MCP service endpoint | None |
-| `MCP_API_KEY` | MCP API key | None |
-
-### Hardware Requirements
-
-**Minimum:**
-- 8GB RAM
-- 4GB free disk space
-- Internet connection (for HuggingFace APIs)
-
-**Recommended:**
-- 16GB RAM
-- NVIDIA GPU with 8GB+ VRAM
-- SSD storage
-- High-speed internet
-
-## 3D Generation Pipeline
-
-The application uses a modern 3D generation pipeline:
-
-1. **Text Input** → User describes their monster
-2. **OmniGen2** → Generates multi-view images
-3. **Hunyuan3D-2.1** → Converts images to 3D mesh
-4. **UniRig** → Automatically rigs the 3D model
-5. **Output** → Fully rigged 3D model ready for animation
-
-### API Integration
-- **OmniGen2**: Via transformers/diffusers pipeline
-- **Hunyuan3D-2.1**: Via official HuggingFace Space API
-- **UniRig**: Via HuggingFace model repository
-
-## Deployment Options
-
-### Local Development
-Use the quick start guide above.
-
-### Docker (Future)
-```bash
-docker build -t digipal .
-docker run -p 7861:7861 -p 8501:8501 digipal
-```
-
-### HuggingFace Spaces
-1. Fork/upload repository to HuggingFace Spaces
-2. Set Space type to "Streamlit"
-3. Configure secrets for HF_TOKEN if needed
-4. Space will auto-deploy
-
-## Troubleshooting
-
-### Common Issues
-
-**Port Already in Use:**
-```bash
-# Change ports
-API_PORT=8081 python app.py
-streamlit run src/ui/streamlit_interface.py --server.port 8502
-```
-
-**Missing Dependencies:**
-```bash
-pip install -r requirements.txt --upgrade
-```
-
-**3D Generation Fails:**
-- Check internet connection
-- Verify HF_TOKEN if using private models
-- Pipeline includes fallback mechanisms
-
-**Streamlit Not Starting:**
-```bash
-pip install streamlit --upgrade
-streamlit --version
-```
-
-### Performance Optimization
-
-**For GPU Systems:**
-- Ensure CUDA is properly installed
-- Models will automatically use GPU when available
-
-**For CPU-Only Systems:**
-- Increase timeout values for 3D generation
-- Consider using smaller model variants
-
-## Monitoring
-
-### Logs
-- Application logs: `logs/digipal.log`
-- Streamlit logs: Console output
-- FastAPI logs: Console output with timestamps
-
-### Health Check
-```bash
-curl http://localhost:7861/health
-```
-
-## Support
-
-For issues and questions:
-1. Check this deployment guide
-2. Review `CLAUDE.md` for development details
-3. Check console logs for error messages
-
-## New Tech Stack Summary
-
-**Replaced:**
-- Gradio → Streamlit (modern UI)
-- Faster Whisper → Kyutai STT-2.6b-en (better accuracy)
-- Complex 3D pipeline → Streamlined OmniGen2→Hunyuan3D→UniRig
-
-**Benefits:**
-- Modern, responsive UI with cyberpunk theme
-- Better speech recognition quality
-- State-of-the-art 3D generation pipeline
-- Simplified deployment and maintenance
-- Better separation of frontend/backend
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index 53c80ba83966e61776ec67a78b88e3367eae0388..0000000000000000000000000000000000000000
--- a/Dockerfile
+++ /dev/null
@@ -1,38 +0,0 @@
-FROM python:3.11-slim
-
-# Set environment variables
-ENV PYTHONUNBUFFERED=1
-ENV PYTHONDONTWRITEBYTECODE=1
-ENV TRANSFORMERS_CACHE=/app/data/cache
-ENV HF_HOME=/app/data/cache
-
-# Install system dependencies
-RUN apt-get update && apt-get install -y \
-    git \
-    ffmpeg \
-    libsndfile1 \
-    curl \
-    && rm -rf /var/lib/apt/lists/*
-
-# Set working directory
-WORKDIR /app
-
-# Copy requirements and install Python dependencies
-COPY requirements.txt .
-RUN pip install --no-cache-dir -r requirements.txt
-
-# Copy application code
-COPY . .
-
-# Create necessary directories
-RUN mkdir -p data/saves data/models data/cache logs config
-
-# Expose ports for FastAPI backend and Streamlit frontend
-EXPOSE 7861 8501
-
-# Health check - check API server on port 7861
-HEALTHCHECK --interval=30s --timeout=30s --start-period=60s --retries=3 \
-    CMD curl -f http://localhost:7861/health || exit 1
-
-# Run the complete application (FastAPI + Streamlit)
-CMD ["python", "run_digipal.py"]
\ No newline at end of file
diff --git a/PROJECT_ARCHITECTURE.md b/PROJECT_ARCHITECTURE.md
deleted file mode 100644
index 1098b5c024a8e857cb687445f3f318a86f6b61a0..0000000000000000000000000000000000000000
--- a/PROJECT_ARCHITECTURE.md
+++ /dev/null
@@ -1,367 +0,0 @@
-# DigiPal Project Architecture & Documentation
-
-## 🏗️ System Overview
-
-DigiPal is a unified AI monster companion application that combines multiple technologies into a single, cohesive system. The architecture is designed to be modular, scalable, and deployable on Hugging Face Spaces with Zero GPU support.
-
-```
-┌─────────────────────────────────────────────────────────────────┐
-│                        DIGIPAL ECOSYSTEM                        │
-├─────────────────────────────────────────────────────────────────┤
-│                                                                 │
-│  ┌─────────────────┐    ┌─────────────────┐    ┌──────────────┐ │
-│  │   Svelte UI     │    │   FastAPI       │    │   Gradio     │ │
-│  │   (Frontend)    │◄──►│   (Backend)     │◄──►│   (Admin)    │ │
-│  │   Port: 5173    │    │   Port: 7861    │    │   Port: 7860  │ │
-│  └─────────────────┘    └─────────────────┘    └──────────────┘ │
-│           │                       │                       │     │
-│           └───────────────────────┼───────────────────────┘     │
-│                                   │                             │
-│  ┌─────────────────────────────────┼─────────────────────────────┐ │
-│  │                    CORE SYSTEMS                              │ │
-│  │  ┌─────────────┐ ┌─────────────┐ ┌─────────────┐            │ │
-│  │  │   Monster   │ │ Evolution   │ │   State     │            │ │
-│  │  │   Engine    │ │   System    │ │  Manager    │            │ │
-│  │  └─────────────┘ └─────────────┘ └─────────────┘            │ │
-│  └─────────────────────────────────────────────────────────────┘ │
-│                                   │                             │
-│  ┌─────────────────────────────────┼─────────────────────────────┐ │
-│  │                    AI SYSTEMS                                │ │
-│  │  ┌─────────────┐ ┌─────────────┐ ┌─────────────┐            │ │
-│  │  │   Qwen      │ │   Speech    │ │   3D        │            │ │
-│  │  │ Processor   │ │   Engine    │ │ Generation  │            │ │
-│  │  └─────────────┘ └─────────────┘ └─────────────┘            │ │
-│  └─────────────────────────────────────────────────────────────┘ │
-│                                   │                             │
-│  ┌─────────────────────────────────┼─────────────────────────────┐ │
-│  │                 DEPLOYMENT & OPTIMIZATION                    │ │
-│  │  ┌─────────────┐ ┌─────────────┐ ┌─────────────┐            │ │
-│  │  │   Zero GPU  │ │ Performance │ │   Spaces    │            │ │
-│  │  │  Optimizer  │ │  Tracker    │ │ Integration │            │ │
-│  │  └─────────────┘ └─────────────┘ └─────────────┘            │ │
-│  └─────────────────────────────────────────────────────────────┘ │
-└─────────────────────────────────────────────────────────────────┘
-```
-
-## 🚀 How It All Works Together
-
-### 1. **Unified Entry Point: `app.py`**
-The main application orchestrates everything:
-
-```python
-# app.py - The Master Controller
-├── FastAPI Server (Port 7861) - REST API & WebSocket
-├── Gradio Interface (Port 7860) - Admin Panel & Fallback UI
-└── Svelte Frontend (Port 5173) - Modern Web Interface
-```
-
-**Key Features:**
-- **Single Command**: `python app.py` starts everything
-- **Threading**: Gradio runs in separate thread, FastAPI in main thread
-- **Unified State**: All components share the same monster data
-- **Zero GPU Ready**: Automatically detects and optimizes for Hugging Face Spaces
-
-### 2. **Component Breakdown**
-
-#### **Frontend Layer**
-```
-Svelte UI (Port 5173)
-├── Modern, responsive web interface
-├── Real-time monster interactions
-├── 3D model viewer
-├── Voice chat interface
-└── Mini-games and training
-```
-
-#### **Backend Layer**
-```
-FastAPI (Port 7861)
-├── REST API endpoints
-├── WebSocket connections
-├── Monster management
-├── AI processing coordination
-└── 3D generation requests
-```
-
-#### **Admin Layer**
-```
-Gradio (Port 7860)
-├── Admin panel for debugging
-├── Fallback UI if Svelte fails
-├── System monitoring
-├── Direct monster creation
-└── Performance metrics
-```
-
-## 🔧 Core Systems Architecture
-
-### **Monster Engine** (`src/core/monster_engine.py`)
-```python
-class Monster:
-    ├── Stats (health, happiness, hunger, energy)
-    ├── Personality (traits, relationship level)
-    ├── Lifecycle (age, stage, evolution)
-    ├── Conversation history
-    └── 3D model data
-```
-
-### **Evolution System** (`src/core/evolution_system.py`)
-```python
-class EvolutionSystem:
-    ├── Evolution requirements
-    ├── Stage progression logic
-    ├── Special evolution conditions
-    └── Evolution history tracking
-```
-
-### **State Management** (`src/ui/state_manager.py`)
-```python
-class AdvancedStateManager:
-    ├── SQLite database persistence
-    ├── Monster save/load operations
-    ├── Conversation history storage
-    └── Performance metrics tracking
-```
-
-## 🤖 AI Systems Architecture
-
-### **Qwen Processor** (`src/ai/qwen_processor.py`)
-```python
-class QwenProcessor:
-    ├── Text generation for monster responses
-    ├── Personality-aware conversations
-    ├── Emotional impact calculation
-    └── Fallback responses when AI unavailable
-```
-
-### **Speech Engine** (`src/ai/speech_engine.py`)
-```python
-class AdvancedSpeechEngine:
-    ├── Voice-to-text processing
-    ├── Text-to-speech synthesis
-    ├── Real-time audio streaming
-    └── Multiple language support
-```
-
-### **3D Generation** (`src/pipelines/`)
-```python
-# Multiple 3D generation options:
-├── hunyuan3d_pipeline.py - Tencent's Hunyuan3D
-├── opensource_3d_pipeline_v2.py - Production pipeline
-└── Integration with Hugging Face Spaces
-```
-
-## 🚀 Deployment Architecture
-
-### **Local Development**
-```
-┌─────────────┐    ┌─────────────┐    ┌─────────────┐
-│   Svelte    │    │   FastAPI   │    │   Gradio    │
-│   (5173)    │◄──►│   (7861)    │◄──►│   (7860)    │
-└─────────────┘    └─────────────┘    └─────────────┘
-```
-
-### **Hugging Face Spaces Deployment**
-```
-┌─────────────────────────────────────────────────┐
-│              HUGGING FACE SPACES                │
-│  ┌─────────────┐    ┌─────────────┐            │
-│  │   Gradio    │    │   FastAPI   │            │
-│  │   (7860)    │◄──►│   (7861)    │            │
-│  └─────────────┘    └─────────────┘            │
-│           │                     │               │
-│           └─────────────────────┘               │
-│                                                 │
-│  ┌─────────────────────────────────────────────┐ │
-│  │           ZERO GPU OPTIMIZATION             │ │
-│  │  • Dynamic GPU allocation                   │ │
-│  │  • CPU fallback for AI models               │ │
-│  │  • Memory optimization                      │ │
-│  │  • Spaces.GPU decorators                    │ │
-│  └─────────────────────────────────────────────┘ │
-└─────────────────────────────────────────────────┘
-```
-
-## 🔄 Data Flow
-
-### **Monster Creation Flow**
-```
-1. User Input (Svelte/Gradio)
-   ↓
-2. FastAPI Endpoint (/api/monsters)
-   ↓
-3. Monster Engine (Create Monster)
-   ↓
-4. State Manager (Save to Database)
-   ↓
-5. Response (Monster Data + 3D Model)
-```
-
-### **Conversation Flow**
-```
-1. User Message (Text/Voice)
-   ↓
-2. Speech Engine (if voice)
-   ↓
-3. Qwen Processor (AI Response)
-   ↓
-4. Monster State Update
-   ↓
-5. State Manager (Save)
-   ↓
-6. WebSocket Update (Real-time)
-```
-
-### **3D Generation Flow**
-```
-1. Monster Creation/Evolution
-   ↓
-2. 3D Pipeline Selection
-   ↓
-3. Multi-view Image Generation
-   ↓
-4. 3D Model Creation
-   ↓
-5. Texture Generation
-   ↓
-6. Model Optimization
-   ↓
-7. Database Storage
-```
-
-## 🎯 Key Features by Component
-
-### **Svelte Frontend**
-- ✅ Modern, responsive UI
-- ✅ Real-time WebSocket updates
-- ✅ Voice chat interface
-- ✅ 3D model viewer
-- ✅ Mini-games
-- ✅ Mobile-friendly design
-
-### **FastAPI Backend**
-- ✅ RESTful API endpoints
-- ✅ WebSocket real-time updates
-- ✅ Monster CRUD operations
-- ✅ AI processing coordination
-- ✅ 3D generation requests
-- ✅ Performance monitoring
-
-### **Gradio Admin**
-- ✅ Admin panel interface
-- ✅ System monitoring
-- ✅ Direct monster management
-- ✅ Fallback UI
-- ✅ Debugging tools
-
-### **AI Systems**
-- ✅ Qwen 2.5 text generation
-- ✅ Speech-to-text processing
-- ✅ Text-to-speech synthesis
-- ✅ Multiple 3D generation pipelines
-- ✅ Fallback responses
-
-### **Core Systems**
-- ✅ Monster lifecycle management
-- ✅ Evolution system
-- ✅ Personality simulation
-- ✅ State persistence
-- ✅ Performance tracking
-
-## 🚀 Getting Started
-
-### **Local Development**
-```bash
-# 1. Install dependencies
-pip install -r requirements.txt
-
-# 2. Start the application
-python app.py
-
-# 3. Access interfaces:
-# - Svelte UI: http://localhost:5173
-# - API: http://localhost:7861
-# - Gradio Admin: http://localhost:7860
-```
-
-### **Hugging Face Spaces**
-```bash
-# 1. Push to repository
-git push origin main
-
-# 2. Spaces automatically deploys
-# 3. Access via Spaces URL
-```
-
-## 🔧 Configuration
-
-### **Environment Variables**
-```bash
-# Server Configuration
-SERVER_NAME=0.0.0.0
-SERVER_PORT=7860
-API_PORT=7861
-
-# AI Configuration
-MCP_ENDPOINT=your_mcp_endpoint
-MCP_API_KEY=your_api_key
-
-# Performance
-MAX_THREADS=40
-LOG_LEVEL=INFO
-```
-
-### **Zero GPU Optimization**
-```python
-# Automatic detection and optimization
-├── GPU available → Use CUDA
-├── CPU only → Optimize for CPU
-├── Memory constraints → Load smaller models
-└── Spaces environment → Apply @spaces.GPU decorators
-```
-
-## 📊 Performance Monitoring
-
-### **Metrics Tracked**
-- Total interactions
-- Average response time
-- User satisfaction
-- AI model performance
-- 3D generation success rate
-- Memory usage
-- GPU utilization
-
-### **Optimization Features**
-- Dynamic model loading
-- Memory-efficient processing
-- Caching strategies
-- Background updates
-- Graceful fallbacks
-
-## 🎯 Why This Architecture?
-
-### **Unified Experience**
-- Single entry point (`app.py`)
-- Shared state across all components
-- Consistent monster data
-- Real-time synchronization
-
-### **Scalability**
-- Modular component design
-- Independent scaling of services
-- Load balancing ready
-- Cloud deployment optimized
-
-### **Reliability**
-- Multiple UI options (Svelte + Gradio)
-- Fallback mechanisms
-- Error handling
-- Graceful degradation
-
-### **Developer Experience**
-- Clear separation of concerns
-- Well-documented APIs
-- Easy testing
-- Hot reloading support
-
-This architecture ensures that DigiPal is both powerful and maintainable, with all the advanced features you've requested while keeping the codebase organized and easy to understand. 
\ No newline at end of file
diff --git a/QUICK_UI_TEST.md b/QUICK_UI_TEST.md
deleted file mode 100644
index a52c2a92d357c396be2cf2bc53e156f22c2d14fa..0000000000000000000000000000000000000000
--- a/QUICK_UI_TEST.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# 🎨 Quick UI Test Guide
-
-## See the New DigiPal UI Now!
-
-### Option 1: UI Only Preview (Fastest)
-```bash
-python test_ui.py
-```
-This shows you the new cyberpunk Streamlit interface without needing the backend.
-
-### Option 2: Full Application
-```bash
-python run_digipal.py
-```
-This runs both backend and frontend for full functionality.
-
-## What You'll See
-
-### 🎨 **Modern Cyberpunk Theme:**
-- Dark gradient backgrounds with neon accents
-- Glowing cyan and magenta color scheme
-- Orbitron and Rajdhani fonts for sci-fi feel
-- Animated neon effects on titles and buttons
-
-### 🖥️ **Interface Features:**
-- **Welcome Screen**: Feature overview with holographic styling
-- **Sidebar**: Monster management with neon buttons
-- **Monster Stats**: Holographic containers with progress bars
-- **Chat Interface**: Cyberpunk-styled conversation area
-- **3D Generation**: Modern controls for model creation
-
-### 🚀 **Interactive Elements:**
-- Hover effects on buttons with glow animations
-- Gradient backgrounds that shift and pulse
-- Neon text effects with shadows
-- Holographic containers with backdrop blur
-
-## Access URLs
-
-After starting:
-- **Streamlit UI**: http://localhost:8501
-- **API Backend**: http://localhost:7861 (if running full app)
-
-## Notes
-
-- The UI test mode shows the interface but backend features won't work
-- Create a monster in the sidebar to see the full interface
-- All the cyberpunk styling and animations will be visible
-- The design is optimized for both desktop and tablet viewing
-
-## Troubleshooting
-
-**If Streamlit won't start:**
-```bash
-pip install streamlit --upgrade
-```
-
-**If you see port conflicts:**
-```bash
-STREAMLIT_PORT=8502 python test_ui.py
-```
-
-Enjoy the new futuristic DigiPal experience! 🐉✨
\ No newline at end of file
diff --git a/README.md b/README.md
index 028c82d613ebe53b390676b40e0afceac2a33506..380bd02ada86f8ca42107458a6d8b47f860aed68 100644
--- a/README.md
+++ b/README.md
@@ -1,127 +1,197 @@
 ---
-title: DigiPal Advanced Monster Companion
-emoji: 🐉
+title: DigiPal - AI Monster Companion
+emoji: 🤖  
 colorFrom: purple
-colorTo: blue
-sdk: streamlit
-sdk_version: 1.25.0
-app_file: streamlit_app.py
+colorTo: green
+sdk: gradio
+sdk_version: 4.16.0
+app_file: app.py
 pinned: false
 license: mit
+hardware: zero-gpu
+hf_oauth: true
+hf_oauth_scopes:
+  - read-repos
+  - write-repos
+hf_oauth_expiration_minutes: 480
 models:
-  - Qwen/Qwen2.5-1.5B-Instruct
   - kyutai/stt-2.6b-en
-  - shitao/OmniGen-v1
+  - Qwen/Qwen2.5-0.5B-Instruct
+  - OmniGen2/OmniGen2
   - tencent/Hunyuan3D-2.1
-  - VAST-AI/UniRig
-datasets: []
 tags:
-  - gaming
+  - game
+  - 3d
+  - voice
+  - monster
   - ai-companion
-  - monster-raising
-  - conversation
-  - speech-recognition
-  - 3d-generation
-  - text-to-3d
-  - cyberpunk
-  - streamlit
-suggested_hardware: zero-a10g
-suggested_storage: medium
+  - digital-pet
 ---
 
-# 🐉 DigiPal - Advanced AI Monster Companion
+# 🤖 DigiPal: AI-Powered Digital Monster Companion
 
-**The most advanced AI-powered virtual monster companion with cutting-edge 3D generation!**
+<div align="center">
+  
+[![Hugging Face Spaces](https://img.shields.io/badge/🤗%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/your-username/DigiPal)
+[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
+[![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
 
-## 🚀 Revolutionary Features
+[Demo](https://huggingface.co/spaces/your-username/DigiPal) | [Report Bug](https://github.com/your-username/DigiPal/issues) | [Request Feature](https://github.com/your-username/DigiPal/issues)
 
-- 🤖 **Advanced AI Conversations** with Qwen 2.5-1.5B-Instruct
-- 🎤 **High-Quality Speech Recognition** with Kyutai STT-2.6b-en  
-- 🎨 **State-of-the-Art 3D Generation** via OmniGen2 → Hunyuan3D-2.1 → UniRig
-- 📊 **Complex Care System** inspired by Digimon World mechanics
-- 🧬 **Dynamic Evolution** based on care quality and interaction
-- 💬 **Personality-Driven Responses** with emotional intelligence
-- 🎮 **Cyberpunk UI** with neon effects and holographic styling
+</div>
 
-## 🛠️ Technology Stack
+## 🌟 Overview
 
-### AI Models
-- **Conversations**: Qwen 2.5-1.5B-Instruct (quantized for efficiency)
-- **Speech-to-Text**: Kyutai STT-2.6b-en (latest multilingual model)
-- **Text-to-Image**: OmniGen2 (multi-view generation)
-- **Image-to-3D**: Hunyuan3D-2.1 (official Tencent model)
-- **3D Rigging**: UniRig (automatic model rigging)
+DigiPal brings the nostalgic charm of digital pet games into the AI era. Create, train, and evolve unique digital monsters using cutting-edge AI models. Inspired by classic games like Digimon World, DigiPal combines voice interaction, real-time 3D generation, and engaging gameplay mechanics.
 
-### Architecture
-- **Frontend**: Streamlit with cyberpunk theme
-- **Backend**: Integrated FastAPI services
-- **Database**: SQLite with async operations
-- **3D Pipeline**: Complete text → image → 3D → rigged workflow
+### ✨ Key Features
 
-## 🎯 3D Generation Pipeline
+- 🎙️ **Voice Interaction**: Create monsters by describing them with your voice
+- 🖼️ **AI-Powered Generation**: Unique monster designs generated in real-time
+- 🦾 **3D Models**: Automatic conversion from 2D to rigged 3D models
+- 🎮 **Classic Gameplay**: Training, evolution, and care mechanics
+- 💾 **Persistent Storage**: Your monsters are saved across sessions
+- 🤖 **Emoji Communication**: Monsters speak in emojis and numbers
+- 🌐 **Zero GPU Optimized**: Runs efficiently on HuggingFace Spaces
 
-The crown jewel of DigiPal is its revolutionary 3D generation system:
+## 🚀 Getting Started
 
-1. **Text Description** → User describes their monster
-2. **OmniGen2** → Generates consistent multi-view images  
-3. **Hunyuan3D-2.1** → Converts images to high-quality 3D mesh
-4. **UniRig** → Automatically rigs the model for animation
-5. **Result** → Fully rigged 3D model ready for games/animation
+### Online Demo
 
-## 🎮 How to Use
+Visit our [HuggingFace Space](https://huggingface.co/spaces/your-username/DigiPal) to try DigiPal instantly!
 
-1. **Create Your Monster**: Choose name and personality type
-2. **Care & Interact**: Feed, train, play, and talk with your companion
-3. **Watch Evolution**: Your monster grows based on care quality
-4. **Generate 3D Model**: Create a unique 3D representation
-5. **Download & Use**: Get your rigged model for other applications
+### Local Installation
 
-## 🎨 Monster Care System
+```bash
+# Clone the repository
+git clone https://github.com/your-username/DigiPal.git
+cd DigiPal
 
-- **Six Core Stats**: Health, Happiness, Hunger, Energy, Discipline, Cleanliness
-- **Real-Time Degradation**: Stats change even when you're away
-- **Evolution Stages**: Egg → Baby → Child → Adult → Champion → Ultimate
-- **Personality Types**: Friendly, Energetic, Calm, Curious, Brave
-- **Complex Requirements**: Age, level, care quality all matter
+# Install dependencies
+pip install -r requirements.txt
 
-## 💫 Technical Highlights
+# Run the application
+python app.py
+```
 
-- **Zero GPU Optimization**: Efficient model loading and inference
-- **Graceful Fallbacks**: Pipeline continues even if some APIs fail
-- **Real-Time Updates**: WebSocket integration for live stat changes
-- **Model Caching**: Intelligent reuse of generated assets
-- **Cross-Platform**: Works on desktop, tablet, and mobile
+## 🎮 How to Play
 
-## 🔧 Development
+### 1. Create Your Monster
 
-### Local Setup
-```bash
-git clone <repository>
-cd digiPal
-pip install -r requirements.txt
+**Voice Control:**
+- Click the microphone button
+- Describe your ideal monster
+- Example: "Create a fire-breathing dragon with blue scales"
 
-# Run complete application
-python run_digipal.py
+**Visual Control:**
+- Upload reference images
+- Draw your monster concept
+- Use the camera for real-world inspiration
 
-# Or run Streamlit only
-streamlit run streamlit_app.py
-```
+### 2. Train & Evolve
 
-### Environment Variables
-```bash
-HF_TOKEN=your_token          # For private models
-MCP_ENDPOINT=your_endpoint   # For MCP integration
-LOG_LEVEL=INFO              # Logging level
-```
+- Choose training types: Strength, Defense, Speed, Intelligence
+- Complete training sessions to improve stats
+- Meet evolution requirements to unlock new forms
+
+### 3. Care System
+
+- **Feed**: Keep hunger above 30%
+- **Play**: Maintain happiness above 40%
+- **Rest**: Manage fatigue levels
+- **Medicine**: Heal when health drops
+
+### 4. Monster Communication
+
+Your monster communicates using emojis and numbers:
+- 🤖💚 = Happy state
+- 🍖❓ = Hungry
+- 😴💤 = Tired
+- Numbers represent HP and happiness percentages
+
+## 🏗️ Architecture
+
+### AI Pipeline
+
+1. **Speech Recognition**: Kyutai STT for voice commands
+2. **Text Generation**: Qwen2.5 for monster traits
+3. **Image Generation**: OmniGen2 for visual creation
+4. **3D Conversion**: Hunyuan3D for model generation
+5. **Rigging**: Automatic skeleton and animation
+
+### Technology Stack
 
-## 📝 License
+- **Frontend**: Gradio with cyberpunk theming
+- **Backend**: Python with HuggingFace Spaces
+- **AI Models**: State-of-the-art transformers
+- **Storage**: Persistent HF Spaces storage
+- **Optimization**: Zero GPU with intelligent fallbacks
 
-MIT License - Feel free to use, modify, and distribute!
+## 📊 Game Mechanics
+
+### Stats System
+- **HP**: Health points (10-999)
+- **Attack**: Offensive power (5-500)
+- **Defense**: Defensive capability (5-500)
+- **Speed**: Movement and reaction (5-500)
+- **Special**: Magic/unique abilities (5-500)
+
+### Evolution Stages
+1. **Rookie**: Starting form
+2. **Champion**: First evolution (150+ total stats)
+3. **Ultimate**: Advanced form (300+ total stats)
+4. **Mega**: Final evolution (500+ total stats)
+
+### Personality Types
+- Brave, Timid, Aggressive, Gentle
+- Playful, Serious, Loyal, Independent
+- Each affects training preferences and dialogue
+
+## 🛠️ Advanced Features
+
+### Custom Modifications
+
+Modify `core/game_mechanics.py` to:
+- Add new evolution paths
+- Create custom training types
+- Implement new care mechanics
+
+### Model Swapping
+
+Replace AI models in `models/` directory:
+- Use different STT models
+- Try alternative image generators
+- Experiment with 3D converters
 
 ## 🤝 Contributing
 
-Contributions welcome! This project pushes the boundaries of AI companions and 3D generation.
+We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
+
+1. Fork the repository
+2. Create your feature branch (`git checkout -b feature/AmazingFeature`)
+3. Commit changes (`git commit -m 'Add AmazingFeature'`)
+4. Push to branch (`git push origin feature/AmazingFeature`)
+5. Open a Pull Request
+
+## 📄 License
+
+This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
+
+## 🙏 Acknowledgments
+
+- Inspired by Digimon World series
+- Built with HuggingFace ecosystem
+- Community feedback and contributions
+- Open-source AI model creators
+
+## 📞 Contact & Support
+
+- **Issues**: [GitHub Issues](https://github.com/your-username/DigiPal/issues)
+- **Discussions**: [HuggingFace Community](https://huggingface.co/spaces/your-username/DigiPal/discussions)
+- **Email**: your-email@example.com
 
 ---
 
-*Experience the future of AI companions with DigiPal! 🐉✨*
\ No newline at end of file
+<div align="center">
+Made with ❤️ by the DigiPal Team
+</div>
\ No newline at end of file
diff --git a/app.py b/app.py
index 03057762eeb93bf6131faf5f827b8580d87d410a..c912272d7372034949932584fcf3d2a74034441e 100644
--- a/app.py
+++ b/app.py
@@ -1,468 +1,419 @@
-"""
-DigiPal - Advanced AI Monster Companion with 3D Generation
-Unified application with all features enabled by default
-"""
-
-import asyncio
-import json
-import logging
+import gradio as gr
+import spaces
 import os
-import sys
-from pathlib import Path
-from typing import Dict, Any, Optional, List
-from datetime import datetime
-import uvicorn
-from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException
-from fastapi.middleware.cors import CORSMiddleware
-from fastapi.responses import JSONResponse
-from pydantic import BaseModel
+import json
 import torch
-from contextlib import asynccontextmanager
-
-# Add src to path
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
-
-# Configure logging
-logging.basicConfig(
-    level=logging.INFO,
-    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
-)
-logger = logging.getLogger(__name__)
-
-# Environment configuration - All features enabled by default
-ENV_CONFIG = {
-    "LOG_LEVEL": os.getenv("LOG_LEVEL", "INFO"),
-    "SERVER_NAME": os.getenv("SERVER_NAME", "0.0.0.0"),
-    "STREAMLIT_PORT": int(os.getenv("STREAMLIT_PORT", "8501")),
-    "API_PORT": int(os.getenv("API_PORT", "7861")),
-    "SHARE": os.getenv("SHARE", "false").lower() == "true",
-    "DEBUG": os.getenv("DEBUG", "false").lower() == "true",
-    "MAX_THREADS": int(os.getenv("MAX_THREADS", "40")),
-    "MCP_ENDPOINT": os.getenv("MCP_ENDPOINT", ""),
-    "MCP_API_KEY": os.getenv("MCP_API_KEY", ""),
-    "HF_TOKEN": os.getenv("HF_TOKEN", "")
-}
-
-# HuggingFace Spaces detection
-IS_SPACES = os.getenv("SPACE_ID") is not None
-
-# API Models
-class CreateMonsterRequest(BaseModel):
-    name: str
-    personality: str
+import gc
+from datetime import datetime
+from pathlib import Path
 
-class MonsterActionRequest(BaseModel):
-    action: str
-    params: Dict[str, Any] = {}
+# Initialize directories
+DATA_DIR = Path("/data") if os.path.exists("/data") else Path("./data")
+DATA_DIR.mkdir(exist_ok=True)
+(DATA_DIR / "users").mkdir(exist_ok=True)
+(DATA_DIR / "monsters").mkdir(exist_ok=True)
+(DATA_DIR / "models").mkdir(exist_ok=True)
+(DATA_DIR / "cache").mkdir(exist_ok=True)
 
-class MonsterTalkRequest(BaseModel):
-    message: str
+# Import modules (to be created)
+from core.ai_pipeline import MonsterGenerationPipeline
+from core.game_mechanics import GameMechanics
+from core.state_manager import StateManager
+from core.auth_manager import AuthManager
+from ui.themes import get_cyberpunk_theme, CYBERPUNK_CSS
+from ui.interfaces import create_voice_interface, create_visual_interface
 
-class Generate3DRequest(BaseModel):
-    description: Optional[str] = None
+# Initialize with GPU optimization
+@spaces.GPU(duration=300)
+def initialize_systems():
+    """Initialize all core systems with GPU"""
+    pipeline = MonsterGenerationPipeline()
+    return pipeline
 
-# Import core modules after environment setup
+# Initialize core systems
 try:
-    from src.core.monster_engine import Monster, MonsterPersonalityType as PersonalityType
-    from src.core.evolution_system import EvolutionSystem
-    from src.ai.qwen_processor import QwenProcessor, ModelConfig
-    from src.ai.speech_engine import AdvancedSpeechEngine as SpeechEngine, SpeechConfig
-    from src.ui.state_manager import AdvancedStateManager as StateManager
-    from src.deployment.zero_gpu_optimizer import get_optimal_device
-    from src.pipelines.opensource_3d_pipeline_v2 import (
-        ProductionPipeline, 
-        ProductionConfig
-    )
-    
-    # UI imports - now using Streamlit (separate process)
-    # from src.ui.streamlit_interface import main as streamlit_main
-except ImportError as e:
-    logger.error(f"Failed to import required modules: {e}")
-    sys.exit(1)
-
-# Initialize FastAPI app
-app = FastAPI(title="DigiPal API", version="1.0.0")
-
-# Add CORS middleware for frontend communication
-app.add_middleware(
-    CORSMiddleware,
-    allow_origins=["*"],  # In production, replace with specific origins
-    allow_credentials=True,
-    allow_methods=["*"],
-    allow_headers=["*"],
-)
-
-# Global state management
-class AppState:
-    def __init__(self):
-        self.monsters: Dict[str, Monster] = {}
-        self.state_manager = StateManager()
-        self.qwen_processor = None
-        self.speech_engine = None
-        self.evolution_system = EvolutionSystem()
-        self.pipeline_3d = None
-        self.active_connections: Dict[str, WebSocket] = {}
-        self.initialized = False
-        
-    async def initialize(self):
-        """Initialize AI components and pipelines"""
-        if self.initialized:
-            return
-            
-        logger.info("Initializing AI components...")
-        
-        # Initialize AI processors
-        try:
-            # Create Qwen processor config based on available resources
-            qwen_config = ModelConfig(
-                model_name="Qwen/Qwen2.5-1.5B-Instruct",  # Smaller model for Spaces
-                max_memory_gb=4.0,  # Conservative memory usage
-                inference_speed="fast",  # Fast inference for Spaces
-                use_quantization=True,
-                use_flash_attention=True
-            )
-            
-            self.qwen_processor = QwenProcessor(qwen_config)
-            
-            # Create speech engine config for Kyutai STT
-            speech_config = SpeechConfig(
-                model_name="kyutai/stt-2.6b-en",  # Kyutai STT model
-                device="auto",  # Auto-detect device
-                torch_dtype="float32",  # Use float32 for better compatibility
-                use_vad=True,
-                vad_aggressiveness=2,
-                use_pipeline=True  # Use pipeline for easier integration
-            )
-            
-            self.speech_engine = SpeechEngine(speech_config)
-            
-            # Initialize 3D pipeline
-            logger.info("Using production pipeline for 3D generation")
-            pipeline_config = ProductionConfig(
-                hf_token=ENV_CONFIG.get("HF_TOKEN"),  # Use proper HF_TOKEN
-                device="cuda" if torch.cuda.is_available() else "cpu"
-            )
-            
-            self.pipeline_3d = ProductionPipeline(pipeline_config)
-            
-            self.initialized = True
-            logger.info("All components initialized successfully")
-            
-        except Exception as e:
-            logger.error(f"Failed to initialize components: {e}")
-            raise
-
-# Create global app state
-app_state = AppState()
-
-# Lifespan event handler
-@asynccontextmanager
-async def lifespan(app: FastAPI):
-    """Lifespan event handler for FastAPI"""
-    # Startup
-    await app_state.initialize()
-    yield
-    # Shutdown
-    pass
-
-# Update app with lifespan
-app.router.lifespan_context = lifespan
-
-# WebSocket connection manager
-class ConnectionManager:
-    def __init__(self):
-        self.active_connections: Dict[str, WebSocket] = {}
-        
-    async def connect(self, websocket: WebSocket, monster_id: str):
-        await websocket.accept()
-        self.active_connections[monster_id] = websocket
-        
-    def disconnect(self, monster_id: str):
-        if monster_id in self.active_connections:
-            del self.active_connections[monster_id]
-            
-    async def send_update(self, monster_id: str, data: dict):
-        if monster_id in self.active_connections:
-            try:
-                await self.active_connections[monster_id].send_json(data)
-            except:
-                self.disconnect(monster_id)
+    pipeline = initialize_systems()
+except Exception as e:
+    print(f"GPU initialization failed, falling back to CPU: {e}")
+    pipeline = MonsterGenerationPipeline(device="cpu")
 
-manager = ConnectionManager()
+game_mechanics = GameMechanics()
+state_manager = StateManager(DATA_DIR)
+auth_manager = AuthManager()
 
-# API Endpoints
-@app.get("/health")
-async def health_check():
-    """Health check endpoint"""
-    return {"status": "healthy", "initialized": app_state.initialized}
-
-@app.get("/api/monsters")
-async def list_monsters():
-    """List all available saved monsters"""
-    try:
-        saved_monsters = await app_state.state_manager.list_saved_monsters()
-        return {"monsters": saved_monsters}
-    except Exception as e:
-        logger.error(f"Error listing monsters: {e}")
-        raise HTTPException(status_code=500, detail=str(e))
-
-@app.post("/api/monsters")
-async def create_monster(request: CreateMonsterRequest):
-    """Create a new monster"""
-    try:
-        # Create new monster
-        personality = PersonalityType[request.personality.upper()]
-        monster = Monster(name=request.name, personality=personality)
-        
-        # Save to state
-        app_state.monsters[monster.id] = monster
-        
-        # Save to database
-        await app_state.state_manager.save_monster(monster)
-        
-        return {
-            "id": monster.id,
-            "name": monster.name,
-            "personality": monster.personality.value,
-            "stage": monster.stage.value,
-            "stats": monster.get_stats()
-        }
-    except Exception as e:
-        logger.error(f"Error creating monster: {e}")
-        raise HTTPException(status_code=500, detail=str(e))
-
-@app.get("/api/monsters/{monster_id}")
-async def get_monster(monster_id: str):
-    """Load a specific monster's full state"""
-    try:
-        # Check if already loaded
-        if monster_id in app_state.monsters:
-            monster = app_state.monsters[monster_id]
-        else:
-            # Load from database
-            monster = await app_state.state_manager.load_monster_by_id(monster_id)
-            if not monster:
-                raise HTTPException(status_code=404, detail="Monster not found")
-            app_state.monsters[monster_id] = monster
-        
+# Main generation function
+@spaces.GPU(duration=180)
+def generate_monster(oauth_profile, audio_input=None, text_input=None, reference_images=None, 
+                    training_focus="balanced", care_level="normal"):
+    """Generate a new monster with AI pipeline"""
+    
+    if oauth_profile is None:
         return {
-            "id": monster.id,
-            "name": monster.name,
-            "personality": monster.personality.value,
-            "stage": monster.stage.value,
-            "stats": monster.get_stats(),
-            "model_url": monster.model_url,
-            "conversation_history": monster.conversation_history[-10:]  # Last 10 messages
+            "message": "🔒 Please log in to create monsters!",
+            "image": None,
+            "model_3d": None,
+            "stats": None,
+            "dialogue": None
         }
-    except HTTPException:
-        raise
-    except Exception as e:
-        logger.error(f"Error loading monster: {e}")
-        raise HTTPException(status_code=500, detail=str(e))
-
-@app.post("/api/monsters/{monster_id}/action")
-async def perform_action(monster_id: str, request: MonsterActionRequest):
-    """Perform a care action on the monster"""
+    
+    user_id = oauth_profile.username if hasattr(oauth_profile, 'username') else str(oauth_profile)
+    
     try:
-        if monster_id not in app_state.monsters:
-            raise HTTPException(status_code=404, detail="Monster not found")
-        
-        monster = app_state.monsters[monster_id]
-        result = {}
-        
-        # Handle different actions
-        if request.action == "feed":
-            food_type = request.params.get("food_type", "balanced")
-            result = monster.feed(food_type)
-        elif request.action == "train":
-            training_type = request.params.get("training_type", "strength")
-            result = monster.train(training_type)
-        elif request.action == "play":
-            result = monster.play()
-        elif request.action == "clean":
-            result = monster.clean()
-        elif request.action == "heal":
-            result = monster.heal()
-        elif request.action == "discipline":
-            result = monster.discipline()
-        elif request.action == "rest":
-            result = monster.rest()
-        else:
-            raise HTTPException(status_code=400, detail=f"Unknown action: {request.action}")
-        
-        # Save state
-        await app_state.state_manager.save_monster(monster)
+        # Generate monster using AI pipeline
+        result = pipeline.generate_monster(
+            audio_input=audio_input,
+            text_input=text_input,
+            reference_images=reference_images,
+            user_id=user_id
+        )
         
-        # Send real-time update
-        await manager.send_update(monster_id, {
-            "type": "stats_update",
-            "stats": monster.get_stats(),
-            "stage": monster.stage.value
+        # Create game monster from AI result
+        monster = game_mechanics.create_monster(result, {
+            "training_focus": training_focus,
+            "care_level": care_level
         })
         
+        # Save to persistent storage
+        state_manager.save_monster(user_id, monster)
+        
+        # Prepare response
         return {
-            "success": True,
-            "result": result,
-            "stats": monster.get_stats()
+            "message": f"✨ {monster.name} has been created!",
+            "image": result.get('image'),
+            "model_3d": result.get('model_3d'),
+            "stats": monster.get_stats_display(),
+            "dialogue": result.get('dialogue', "🤖💚1️⃣0️⃣0️⃣")
         }
-    except HTTPException:
-        raise
-    except Exception as e:
-        logger.error(f"Error performing action: {e}")
-        raise HTTPException(status_code=500, detail=str(e))
-
-@app.post("/api/monsters/{monster_id}/talk")
-async def talk_to_monster(monster_id: str, request: MonsterTalkRequest):
-    """Send a text message to the monster"""
-    try:
-        if monster_id not in app_state.monsters:
-            raise HTTPException(status_code=404, detail="Monster not found")
-        
-        monster = app_state.monsters[monster_id]
-        
-        # Use MCP if available, otherwise use local processor
-        if ENV_CONFIG["MCP_ENDPOINT"] and hasattr(app_state.qwen_processor, 'use_mcp'):
-            response = await app_state.qwen_processor.generate_response_mcp(
-                monster, request.message
-            )
-        else:
-            response = app_state.qwen_processor.generate_response(
-                monster, request.message
-            )
-        
-        # Update conversation history
-        monster.conversation_history.append({
-            "role": "user",
-            "content": request.message,
-            "timestamp": datetime.now().isoformat()
-        })
-        monster.conversation_history.append({
-            "role": "assistant", 
-            "content": response,
-            "timestamp": datetime.now().isoformat()
-        })
-        
-        # Save state
-        await app_state.state_manager.save_monster(monster)
         
+    except Exception as e:
+        print(f"Error generating monster: {str(e)}")
+        # Use fallback generation
+        fallback_result = pipeline.fallback_generation(text_input or "friendly digital creature")
         return {
-            "response": response,
-            "stats": monster.get_stats()
+            "message": "⚡ Created using quick generation mode",
+            "image": fallback_result.get('image'),
+            "model_3d": None,
+            "stats": fallback_result.get('stats'),
+            "dialogue": "🤖❓9️⃣9️⃣"
         }
-    except HTTPException:
-        raise
-    except Exception as e:
-        logger.error(f"Error talking to monster: {e}")
-        raise HTTPException(status_code=500, detail=str(e))
 
-@app.post("/api/monsters/{monster_id}/generate-3d")
-async def generate_3d_model(monster_id: str, request: Generate3DRequest):
-    """Trigger 3D model generation for the monster"""
-    try:
-        if monster_id not in app_state.monsters:
-            raise HTTPException(status_code=404, detail="Monster not found")
-        
-        monster = app_state.monsters[monster_id]
-        
-        # Generate description if not provided
-        if not request.description:
-            description = f"A {monster.personality.value} {monster.stage.value} digital monster"
-        else:
-            description = request.description
-        
-        # Generate 3D model
-        logger.info(f"Generating 3D model for {monster.name}: {description}")
-        model_path = await app_state.pipeline_3d.generate_3d_model(
-            prompt=description,
-            output_path=f"data/models/{monster_id}/model.glb"
+# Training function
+def train_monster(oauth_profile, training_type, intensity):
+    """Train the active monster"""
+    
+    if oauth_profile is None:
+        return "🔒 Please log in to train monsters!", None, None
+    
+    user_id = oauth_profile.username if hasattr(oauth_profile, 'username') else str(oauth_profile)
+    current_monster = state_manager.get_current_monster(user_id)
+    
+    if not current_monster:
+        return "❌ No active monster to train!", None, None
+    
+    # Apply training
+    result = game_mechanics.train_monster(current_monster, training_type, intensity)
+    
+    if result['success']:
+        state_manager.update_monster(user_id, current_monster)
+        return (
+            result['message'],
+            current_monster.get_stats_display(),
+            result.get('evolution_check')
         )
-        
-        # Update monster with model URL
-        monster.model_url = f"/models/{monster_id}/{Path(model_path).name}"
-        await app_state.state_manager.save_monster(monster)
-        
-        # Send update via WebSocket
-        await manager.send_update(monster_id, {
-            "type": "model_update",
-            "model_url": monster.model_url
-        })
-        
-        return {
-            "success": True,
-            "model_url": monster.model_url
-        }
-    except HTTPException:
-        raise
-    except Exception as e:
-        logger.error(f"Error generating 3D model: {e}")
-        raise HTTPException(status_code=500, detail=str(e))
+    else:
+        return result['message'], None, None
 
-@app.websocket("/api/monsters/{monster_id}/ws")
-async def websocket_endpoint(websocket: WebSocket, monster_id: str):
-    """WebSocket endpoint for real-time updates"""
-    await manager.connect(websocket, monster_id)
+# Care functions
+def feed_monster(oauth_profile, food_type):
+    """Feed the active monster"""
     
-    try:
-        # Send initial stats
-        if monster_id in app_state.monsters:
-            monster = app_state.monsters[monster_id]
-            await websocket.send_json({
-                "type": "initial_state",
-                "stats": monster.get_stats(),
-                "stage": monster.stage.value,
-                "model_url": monster.model_url
-            })
+    if oauth_profile is None:
+        return "🔒 Please log in to care for monsters!"
+    
+    user_id = oauth_profile.username if hasattr(oauth_profile, 'username') else str(oauth_profile)
+    current_monster = state_manager.get_current_monster(user_id)
+    
+    if not current_monster:
+        return "❌ No active monster to feed!"
+    
+    result = game_mechanics.feed_monster(current_monster, food_type)
+    state_manager.update_monster(user_id, current_monster)
+    
+    return result['message']
+
+# Build the Gradio interface
+with gr.Blocks(
+    theme=get_cyberpunk_theme(),
+    css=CYBERPUNK_CSS,
+    title="DigiPal - Digital Monster Companion"
+) as demo:
+    
+    # Header with cyberpunk styling
+    gr.HTML("""
+        <div class="cyber-header">
+            <h1 class="glitch-text">🤖 DigiPal 🤖</h1>
+            <p class="cyber-subtitle">Your AI-Powered Digital Monster Companion</p>
+            <div class="pulse-line"></div>
+        </div>
+    """)
+    
+    # Authentication
+    with gr.Row():
+        login_btn = gr.LoginButton("🔐 Connect to Digital World", size="lg")
+        logout_btn = gr.LogoutButton("🔌 Disconnect", size="sm")
+        user_display = gr.Markdown("", elem_classes=["user-status"])
+    
+    # Main interface tabs
+    with gr.Tabs(elem_classes=["cyber-tabs"]):
+        
+        # Monster Creation Tab
+        with gr.TabItem("🧬 Create Monster", elem_classes=["cyber-tab-content"]):
+            with gr.Row():
+                # Input Column
+                with gr.Column(scale=1):
+                    gr.Markdown("### 🎙️ Voice Input")
+                    audio_input = gr.Audio(
+                        label="Describe your monster",
+                        sources=["microphone", "upload"],
+                        type="filepath",
+                        elem_classes=["cyber-input"]
+                    )
+                    
+                    gr.Markdown("### 💬 Text Input")
+                    text_input = gr.Textbox(
+                        label="Or type a description",
+                        placeholder="Describe your ideal digital monster...",
+                        lines=3,
+                        elem_classes=["cyber-input"]
+                    )
+                    
+                    gr.Markdown("### 🖼️ Reference Images")
+                    reference_images = gr.File(
+                        label="Upload reference images (optional)",
+                        file_count="multiple",
+                        file_types=["image"],
+                        elem_classes=["cyber-input"]
+                    )
+                    
+                    with gr.Row():
+                        training_focus = gr.Radio(
+                            choices=["balanced", "strength", "defense", "speed", "intelligence"],
+                            label="Training Focus",
+                            value="balanced",
+                            elem_classes=["cyber-radio"]
+                        )
+                    
+                    generate_btn = gr.Button(
+                        "⚡ Generate Monster",
+                        variant="primary",
+                        size="lg",
+                        elem_classes=["cyber-button", "generate-button"]
+                    )
+                
+                # Output Column
+                with gr.Column(scale=1):
+                    generation_message = gr.Markdown("", elem_classes=["cyber-message"])
+                    
+                    monster_image = gr.Image(
+                        label="Monster Appearance",
+                        type="pil",
+                        elem_classes=["monster-display"]
+                    )
+                    
+                    monster_model = gr.Model3D(
+                        label="3D Model",
+                        height=400,
+                        elem_classes=["monster-display"]
+                    )
+                    
+                    monster_dialogue = gr.Textbox(
+                        label="Monster Says",
+                        interactive=False,
+                        elem_classes=["cyber-dialogue"]
+                    )
+                    
+                    monster_stats = gr.JSON(
+                        label="Stats",
+                        elem_classes=["cyber-stats"]
+                    )
         
-        # Keep connection alive and handle stat degradation
-        while True:
-            await asyncio.sleep(30)  # Update every 30 seconds
-            
-            if monster_id in app_state.monsters:
-                monster = app_state.monsters[monster_id]
-                monster.update_time_based_stats()
+        # Monster Status Tab
+        with gr.TabItem("📊 Monster Status", elem_classes=["cyber-tab-content"]):
+            with gr.Row():
+                with gr.Column():
+                    current_monster_display = gr.Model3D(
+                        label="Your Digital Monster",
+                        height=400,
+                        elem_classes=["monster-display"]
+                    )
+                    
+                    monster_communication = gr.Textbox(
+                        label="Monster Communication",
+                        placeholder="Your monster speaks in emojis and numbers...",
+                        interactive=False,
+                        elem_classes=["cyber-dialogue"]
+                    )
                 
-                await websocket.send_json({
-                    "type": "stats_update",
-                    "stats": monster.get_stats(),
-                    "stage": monster.stage.value
-                })
+                with gr.Column():
+                    stats_display = gr.JSON(
+                        label="Current Stats",
+                        elem_classes=["cyber-stats"]
+                    )
+                    
+                    care_metrics = gr.JSON(
+                        label="Care Status",
+                        elem_classes=["cyber-stats"]
+                    )
+                    
+                    evolution_progress = gr.HTML(
+                        elem_classes=["evolution-display"]
+                    )
+                    
+                    refresh_btn = gr.Button(
+                        "🔄 Refresh Status",
+                        elem_classes=["cyber-button"]
+                    )
+        
+        # Training Tab
+        with gr.TabItem("💪 Training", elem_classes=["cyber-tab-content"]):
+            with gr.Row():
+                with gr.Column():
+                    training_type = gr.Radio(
+                        choices=["Strength", "Defense", "Speed", "Intelligence", "Special"],
+                        label="Training Type",
+                        value="Strength",
+                        elem_classes=["cyber-radio"]
+                    )
+                    
+                    training_intensity = gr.Slider(
+                        minimum=1,
+                        maximum=10,
+                        value=5,
+                        step=1,
+                        label="Training Intensity",
+                        elem_classes=["cyber-slider"]
+                    )
+                    
+                    train_btn = gr.Button(
+                        "🏋️ Start Training",
+                        variant="primary",
+                        elem_classes=["cyber-button"]
+                    )
                 
-    except WebSocketDisconnect:
-        manager.disconnect(monster_id)
-
-# Streamlit interface runs separately
-# Use: streamlit run src/ui/streamlit_interface.py
-
-# Main entry point
-if __name__ == "__main__":
-    # Create necessary directories
-    os.makedirs("data/saves", exist_ok=True)
-    os.makedirs("data/models", exist_ok=True)
-    os.makedirs("data/cache", exist_ok=True)
-    os.makedirs("logs", exist_ok=True)
-    
-    # Log startup info
-    logger.info("=" * 60)
-    logger.info("DigiPal - Advanced AI Monster Companion")
-    logger.info("=" * 60)
-    logger.info(f"Environment: {'HuggingFace Spaces' if IS_SPACES else 'Local'}")
-    logger.info(f"FastAPI Backend Port: {ENV_CONFIG['API_PORT']}")
-    logger.info(f"Streamlit UI: Run separately on port {ENV_CONFIG['STREAMLIT_PORT']}")
-    logger.info(f"MCP Enabled: {bool(ENV_CONFIG['MCP_ENDPOINT'])}")
-    logger.info("=" * 60)
+                with gr.Column():
+                    training_result = gr.Textbox(
+                        label="Training Result",
+                        interactive=False,
+                        elem_classes=["cyber-output"]
+                    )
+                    
+                    updated_stats = gr.JSON(
+                        label="Updated Stats",
+                        elem_classes=["cyber-stats"]
+                    )
+                    
+                    evolution_check = gr.HTML(
+                        elem_classes=["evolution-display"]
+                    )
+        
+        # Care Tab
+        with gr.TabItem("❤️ Care", elem_classes=["cyber-tab-content"]):
+            with gr.Row():
+                with gr.Column():
+                    gr.Markdown("### 🍖 Feeding")
+                    food_type = gr.Radio(
+                        choices=["Meat", "Fish", "Vegetable", "Treat", "Medicine"],
+                        label="Select Food",
+                        value="Meat",
+                        elem_classes=["cyber-radio"]
+                    )
+                    
+                    feed_btn = gr.Button(
+                        "🍽️ Feed Monster",
+                        elem_classes=["cyber-button"]
+                    )
+                    
+                    feeding_result = gr.Textbox(
+                        label="Feeding Result",
+                        interactive=False,
+                        elem_classes=["cyber-output"]
+                    )
+                
+                with gr.Column():
+                    gr.Markdown("### 🎮 Interaction")
+                    
+                    play_btn = gr.Button(
+                        "🎾 Play",
+                        elem_classes=["cyber-button"]
+                    )
+                    
+                    praise_btn = gr.Button(
+                        "👏 Praise",
+                        elem_classes=["cyber-button"]
+                    )
+                    
+                    scold_btn = gr.Button(
+                        "👎 Scold",
+                        elem_classes=["cyber-button"]
+                    )
+                    
+                    interaction_result = gr.Textbox(
+                        label="Monster Response",
+                        interactive=False,
+                        elem_classes=["cyber-output"]
+                    )
     
-    # Start FastAPI server only
-    # Streamlit interface runs separately via: streamlit run src/ui/streamlit_interface.py
-    logger.info("Starting FastAPI backend server...")
-    logger.info(f"Streamlit UI: Run 'streamlit run src/ui/streamlit_interface.py' in another terminal")
+    # Event handlers
+    generate_btn.click(
+        fn=generate_monster,
+        inputs=[
+            gr.State(lambda: gr.Request().username if hasattr(gr.Request(), 'username') else None),
+            audio_input,
+            text_input,
+            reference_images,
+            training_focus,
+            gr.State("normal")  # care_level
+        ],
+        outputs=[
+            generation_message,
+            monster_image,
+            monster_model,
+            monster_stats,
+            monster_dialogue
+        ]
+    )
     
-    config = uvicorn.Config(
-        app,
-        host=ENV_CONFIG["SERVER_NAME"],
-        port=ENV_CONFIG["API_PORT"],
-        log_level=ENV_CONFIG["LOG_LEVEL"].lower()
+    train_btn.click(
+        fn=train_monster,
+        inputs=[
+            gr.State(lambda: gr.Request().username if hasattr(gr.Request(), 'username') else None),
+            training_type,
+            training_intensity
+        ],
+        outputs=[
+            training_result,
+            updated_stats,
+            evolution_check
+        ]
     )
-    server = uvicorn.Server(config)
     
-    # Run FastAPI server
-    asyncio.run(server.serve())
\ No newline at end of file
+    feed_btn.click(
+        fn=feed_monster,
+        inputs=[
+            gr.State(lambda: gr.Request().username if hasattr(gr.Request(), 'username') else None),
+            food_type
+        ],
+        outputs=[feeding_result]
+    )
+
+# Launch the app
+if __name__ == "__main__":
+    demo.queue(
+        default_concurrency_limit=10,
+        max_size=100
+    ).launch(
+        server_name="0.0.0.0",
+        server_port=7860,
+        show_api=False,
+        show_error=True
+    )
\ No newline at end of file
diff --git a/src/core/__init__.py b/core/__init__.py
similarity index 100%
rename from src/core/__init__.py
rename to core/__init__.py
diff --git a/core/ai_pipeline.py b/core/ai_pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f343a8d4b7ea1263cc6c029d33c14cd5ee88a24
--- /dev/null
+++ b/core/ai_pipeline.py
@@ -0,0 +1,309 @@
+import spaces
+import torch
+import gc
+import os
+from typing import Optional, List, Dict, Any
+from datetime import datetime
+from pathlib import Path
+import numpy as np
+from PIL import Image
+import tempfile
+
+# Model imports (to be implemented)
+from models.stt_processor import KyutaiSTTProcessor
+from models.text_generator import QwenTextGenerator  
+from models.image_generator import OmniGenImageGenerator
+from models.model_3d_generator import Hunyuan3DGenerator
+from models.rigging_processor import UniRigProcessor
+from utils.fallbacks import FallbackManager
+from utils.caching import ModelCache
+
+class MonsterGenerationPipeline:
+    """Main AI pipeline for monster generation"""
+    
+    def __init__(self, device: str = "cuda"):
+        self.device = device if torch.cuda.is_available() else "cpu"
+        self.cache = ModelCache()
+        self.fallback_manager = FallbackManager()
+        self.models = {}
+        self.model_loaded = {
+            'stt': False,
+            'text_gen': False,
+            'image_gen': False,
+            '3d_gen': False,
+            'rigging': False
+        }
+        
+        # Pipeline configuration
+        self.config = {
+            'max_retries': 3,
+            'timeout': 180,
+            'enable_caching': True,
+            'low_vram_mode': True
+        }
+    
+    def _cleanup_memory(self):
+        """Clear GPU memory"""
+        if self.device == "cuda":
+            torch.cuda.empty_cache()
+            torch.cuda.synchronize()
+        gc.collect()
+    
+    def _lazy_load_model(self, model_type: str):
+        """Lazy loading with memory optimization"""
+        if self.model_loaded[model_type]:
+            return self.models[model_type]
+        
+        # Clear memory before loading new model
+        self._cleanup_memory()
+        
+        try:
+            if model_type == 'stt':
+                self.models['stt'] = KyutaiSTTProcessor(device=self.device)
+            elif model_type == 'text_gen':
+                self.models['text_gen'] = QwenTextGenerator(device=self.device)
+            elif model_type == 'image_gen':
+                self.models['image_gen'] = OmniGenImageGenerator(device=self.device)
+            elif model_type == '3d_gen':
+                self.models['3d_gen'] = Hunyuan3DGenerator(device=self.device)
+            elif model_type == 'rigging':
+                self.models['rigging'] = UniRigProcessor(device=self.device)
+            
+            self.model_loaded[model_type] = True
+            return self.models[model_type]
+            
+        except Exception as e:
+            print(f"Failed to load {model_type}: {e}")
+            return None
+    
+    def _unload_model(self, model_type: str):
+        """Unload model to free memory"""
+        if model_type in self.models and self.model_loaded[model_type]:
+            if hasattr(self.models[model_type], 'to'):
+                self.models[model_type].to('cpu')
+            del self.models[model_type]
+            self.model_loaded[model_type] = False
+            self._cleanup_memory()
+    
+    @spaces.GPU(duration=300)
+    def generate_monster(self, 
+                        audio_input: Optional[str] = None,
+                        text_input: Optional[str] = None,
+                        reference_images: Optional[List] = None,
+                        user_id: str = None) -> Dict[str, Any]:
+        """Main monster generation pipeline"""
+        
+        generation_log = {
+            'user_id': user_id,
+            'timestamp': datetime.now().isoformat(),
+            'stages_completed': [],
+            'fallbacks_used': [],
+            'success': False
+        }
+        
+        try:
+            # Stage 1: Speech to Text (if audio provided)
+            description = ""
+            if audio_input and os.path.exists(audio_input):
+                try:
+                    stt_model = self._lazy_load_model('stt')
+                    if stt_model:
+                        description = stt_model.transcribe(audio_input)
+                        generation_log['stages_completed'].append('stt')
+                    else:
+                        raise Exception("STT model failed to load")
+                except Exception as e:
+                    print(f"STT failed: {e}")
+                    description = text_input or "Create a friendly digital monster"
+                    generation_log['fallbacks_used'].append('stt')
+                finally:
+                    # Unload STT to free memory
+                    self._unload_model('stt')
+            else:
+                description = text_input or "Create a friendly digital monster"
+            
+            # Stage 2: Generate monster characteristics
+            monster_traits = {}
+            monster_dialogue = ""
+            try:
+                text_gen = self._lazy_load_model('text_gen')
+                if text_gen:
+                    monster_traits = text_gen.generate_traits(description)
+                    monster_dialogue = text_gen.generate_dialogue(monster_traits)
+                    generation_log['stages_completed'].append('text_gen')
+                else:
+                    raise Exception("Text generation model failed to load")
+            except Exception as e:
+                print(f"Text generation failed: {e}")
+                monster_traits, monster_dialogue = self.fallback_manager.handle_text_gen_failure(description)
+                generation_log['fallbacks_used'].append('text_gen')
+            finally:
+                self._unload_model('text_gen')
+            
+            # Stage 3: Generate monster image
+            monster_image = None
+            try:
+                image_gen = self._lazy_load_model('image_gen')
+                if image_gen:
+                    # Create enhanced prompt from traits
+                    image_prompt = self._create_image_prompt(description, monster_traits)
+                    monster_image = image_gen.generate(
+                        prompt=image_prompt,
+                        reference_images=reference_images,
+                        width=512,
+                        height=512
+                    )
+                    generation_log['stages_completed'].append('image_gen')
+                else:
+                    raise Exception("Image generation model failed to load")
+            except Exception as e:
+                print(f"Image generation failed: {e}")
+                monster_image = self.fallback_manager.handle_image_gen_failure(description)
+                generation_log['fallbacks_used'].append('image_gen')
+            finally:
+                self._unload_model('image_gen')
+            
+            # Stage 4: Convert to 3D model
+            model_3d = None
+            model_3d_path = None
+            try:
+                model_3d_gen = self._lazy_load_model('3d_gen')
+                if model_3d_gen and monster_image:
+                    model_3d = model_3d_gen.image_to_3d(monster_image)
+                    # Save 3D model
+                    model_3d_path = self._save_3d_model(model_3d, user_id)
+                    generation_log['stages_completed'].append('3d_gen')
+                else:
+                    raise Exception("3D generation failed")
+            except Exception as e:
+                print(f"3D generation failed: {e}")
+                model_3d = self.fallback_manager.handle_3d_gen_failure(monster_image)
+                generation_log['fallbacks_used'].append('3d_gen')
+            finally:
+                self._unload_model('3d_gen')
+            
+            # Stage 5: Add rigging (optional, can be skipped for performance)
+            rigged_model = model_3d
+            if model_3d and self.config.get('enable_rigging', False):
+                try:
+                    rigging_proc = self._lazy_load_model('rigging')
+                    if rigging_proc:
+                        rigged_model = rigging_proc.rig_mesh(model_3d)
+                        generation_log['stages_completed'].append('rigging')
+                except Exception as e:
+                    print(f"Rigging failed: {e}")
+                    generation_log['fallbacks_used'].append('rigging')
+                finally:
+                    self._unload_model('rigging')
+            
+            # Prepare download files
+            download_files = self._prepare_download_files(
+                rigged_model or model_3d, 
+                monster_image,
+                user_id
+            )
+            
+            generation_log['success'] = True
+            
+            return {
+                'description': description,
+                'traits': monster_traits,
+                'dialogue': monster_dialogue,
+                'image': monster_image,
+                'model_3d': model_3d_path,
+                'download_files': download_files,
+                'generation_log': generation_log,
+                'status': 'success'
+            }
+            
+        except Exception as e:
+            generation_log['error'] = str(e)
+            print(f"Pipeline error: {e}")
+            return self.fallback_generation(description or "digital monster", generation_log)
+    
+    def _create_image_prompt(self, base_description: str, traits: Dict) -> str:
+        """Create enhanced prompt for image generation"""
+        prompt_parts = [base_description]
+        
+        if traits:
+            if 'appearance' in traits:
+                prompt_parts.append(traits['appearance'])
+            if 'personality' in traits:
+                prompt_parts.append(f"with {traits['personality']} personality")
+            if 'color_scheme' in traits:
+                prompt_parts.append(f"featuring {traits['color_scheme']} colors")
+        
+        prompt_parts.extend([
+            "digital monster",
+            "creature design",
+            "game character",
+            "high quality",
+            "detailed"
+        ])
+        
+        return ", ".join(prompt_parts)
+    
+    def _save_3d_model(self, model_3d, user_id: str) -> str:
+        """Save 3D model to persistent storage"""
+        if not model_3d:
+            return None
+            
+        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+        filename = f"monster_{user_id}_{timestamp}.glb"
+        
+        # Use HuggingFace Spaces persistent storage
+        if os.path.exists("/data"):
+            filepath = f"/data/models/{filename}"
+        else:
+            filepath = f"./data/models/{filename}"
+        
+        os.makedirs(os.path.dirname(filepath), exist_ok=True)
+        
+        # Save model (implementation depends on model format)
+        # This is a placeholder - actual implementation would depend on model format
+        with open(filepath, 'wb') as f:
+            if hasattr(model_3d, 'export'):
+                model_3d.export(f)
+            else:
+                # Fallback: save as binary data
+                f.write(str(model_3d).encode())
+        
+        return filepath
+    
+    def _prepare_download_files(self, model_3d, image, user_id: str) -> List[str]:
+        """Prepare downloadable files for user"""
+        files = []
+        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+        
+        # Save image
+        if image:
+            if isinstance(image, Image.Image):
+                image_path = f"/tmp/monster_{user_id}_{timestamp}.png"
+                image.save(image_path)
+                files.append(image_path)
+            elif isinstance(image, np.ndarray):
+                image_path = f"/tmp/monster_{user_id}_{timestamp}.png"
+                Image.fromarray(image).save(image_path)
+                files.append(image_path)
+        
+        # Save 3D model in multiple formats if available
+        if model_3d:
+            # GLB format
+            glb_path = f"/tmp/monster_{user_id}_{timestamp}.glb"
+            files.append(glb_path)
+            
+            # OBJ format (optional)
+            obj_path = f"/tmp/monster_{user_id}_{timestamp}.obj"
+            files.append(obj_path)
+        
+        return files
+    
+    def fallback_generation(self, description: str, generation_log: Dict) -> Dict[str, Any]:
+        """Complete fallback generation when pipeline fails"""
+        return self.fallback_manager.complete_fallback_generation(description, generation_log)
+    
+    def cleanup(self):
+        """Clean up all loaded models"""
+        for model_type in list(self.models.keys()):
+            self._unload_model(model_type)
+        self._cleanup_memory()
\ No newline at end of file
diff --git a/core/auth_manager.py b/core/auth_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..2953caac67d7639a43059a4ae7360ddc04478d82
--- /dev/null
+++ b/core/auth_manager.py
@@ -0,0 +1,101 @@
+from typing import Optional, Dict, Any
+from datetime import datetime, timedelta
+import secrets
+import json
+from pathlib import Path
+
+class AuthManager:
+    """Manages authentication for HuggingFace Spaces OAuth"""
+    
+    def __init__(self):
+        # OAuth scopes for HuggingFace Spaces
+        self.oauth_scopes = [
+            "read-repos",
+            "write-repos"
+        ]
+        
+        # Session management
+        self.sessions = {}
+        self.session_timeout = timedelta(hours=8)
+    
+    def get_oauth_config(self) -> Dict[str, Any]:
+        """Get OAuth configuration for HuggingFace Spaces"""
+        return {
+            "provider": "huggingface",
+            "scopes": self.oauth_scopes,
+            "expiration_minutes": 480,  # 8 hours
+            "allow_anonymous": False
+        }
+    
+    def validate_session(self, session_token: str) -> Optional[Dict[str, Any]]:
+        """Validate a session token"""
+        if session_token in self.sessions:
+            session = self.sessions[session_token]
+            if datetime.now() < session['expires']:
+                # Update last access
+                session['last_access'] = datetime.now()
+                return session['user_data']
+        return None
+    
+    def create_session(self, oauth_profile: Dict[str, Any]) -> str:
+        """Create a new session for authenticated user"""
+        session_token = secrets.token_urlsafe(32)
+        
+        self.sessions[session_token] = {
+            'user_data': {
+                'username': oauth_profile.get('preferred_username', oauth_profile.get('username')),
+                'name': oauth_profile.get('name', 'Anonymous'),
+                'avatar_url': oauth_profile.get('picture', oauth_profile.get('avatar_url')),
+                'auth_time': datetime.now().isoformat()
+            },
+            'created': datetime.now(),
+            'expires': datetime.now() + self.session_timeout,
+            'last_access': datetime.now()
+        }
+        
+        return session_token
+    
+    def cleanup_expired_sessions(self):
+        """Remove expired sessions"""
+        current_time = datetime.now()
+        expired_tokens = [
+            token for token, session in self.sessions.items()
+            if current_time > session['expires']
+        ]
+        
+        for token in expired_tokens:
+            del self.sessions[token]
+    
+    def get_user_permissions(self, username: str) -> Dict[str, bool]:
+        """Get user permissions"""
+        # In HuggingFace Spaces, all authenticated users have same permissions
+        return {
+            'can_create_monster': True,
+            'can_train': True,
+            'can_evolve': True,
+            'can_battle': True,
+            'can_export': True,
+            'max_monsters': 10,
+            'max_daily_generations': 50
+        }
+    
+    def log_user_action(self, username: str, action: str, details: Dict = None):
+        """Log user actions for analytics"""
+        # This would typically write to a database or analytics service
+        # For HF Spaces, we'll just print for now
+        log_entry = {
+            'timestamp': datetime.now().isoformat(),
+            'username': username,
+            'action': action,
+            'details': details or {}
+        }
+        print(f"User Action: {json.dumps(log_entry)}")
+    
+    def format_oauth_button_config(self) -> Dict[str, Any]:
+        """Format configuration for Gradio LoginButton"""
+        return {
+            "value": "Connect to Digital World",
+            "size": "lg",
+            "icon": "🔐",
+            "variant": "primary"
+        }
\ No newline at end of file
diff --git a/core/game_mechanics.py b/core/game_mechanics.py
new file mode 100644
index 0000000000000000000000000000000000000000..deee1fc31c8d29958b7c86acf8fa9c343082303b
--- /dev/null
+++ b/core/game_mechanics.py
@@ -0,0 +1,496 @@
+import json
+import random
+from datetime import datetime, timedelta
+from typing import Dict, List, Any, Optional
+from dataclasses import dataclass, asdict
+import numpy as np
+
+@dataclass
+class Monster:
+    """Monster data class"""
+    name: str
+    species: str
+    stage: str  # rookie, champion, ultimate, mega
+    stats: Dict[str, int]
+    care_state: Dict[str, float]
+    personality: Dict[str, Any]
+    birth_time: datetime
+    evolution_time: Optional[datetime] = None
+    training_count: int = 0
+    battle_count: int = 0
+    happiness_events: List[str] = None
+    image_path: Optional[str] = None
+    model_3d_path: Optional[str] = None
+    
+    def __post_init__(self):
+        if self.happiness_events is None:
+            self.happiness_events = []
+    
+    def to_dict(self) -> Dict:
+        """Convert monster to dictionary for storage"""
+        data = asdict(self)
+        data['birth_time'] = self.birth_time.isoformat()
+        if self.evolution_time:
+            data['evolution_time'] = self.evolution_time.isoformat()
+        return data
+    
+    @classmethod
+    def from_dict(cls, data: Dict) -> 'Monster':
+        """Create monster from dictionary"""
+        data['birth_time'] = datetime.fromisoformat(data['birth_time'])
+        if data.get('evolution_time'):
+            data['evolution_time'] = datetime.fromisoformat(data['evolution_time'])
+        return cls(**data)
+    
+    def get_stats_display(self) -> Dict[str, Any]:
+        """Get formatted stats for display"""
+        return {
+            "name": self.name,
+            "species": self.species,
+            "stage": self.stage,
+            "level": self._calculate_level(),
+            "stats": {
+                "HP": f"{self.stats['hp']}/999",
+                "ATK": f"{self.stats['attack']}/500",
+                "DEF": f"{self.stats['defense']}/500",
+                "SPD": f"{self.stats['speed']}/500",
+                "SPC": f"{self.stats['special']}/500"
+            },
+            "care": {
+                "Hunger": f"{self.care_state['hunger']:.0f}%",
+                "Happiness": f"{self.care_state['happiness']:.0f}%",
+                "Fatigue": f"{self.care_state['fatigue']:.0f}%",
+                "Health": f"{self.care_state['health']:.0f}%"
+            },
+            "age": self._calculate_age()
+        }
+    
+    def _calculate_level(self) -> int:
+        """Calculate monster level based on stats and experience"""
+        total_stats = sum(self.stats.values())
+        base_level = total_stats // 50
+        exp_bonus = (self.training_count + self.battle_count) // 10
+        return min(99, base_level + exp_bonus + 1)
+    
+    def _calculate_age(self) -> str:
+        """Calculate monster age"""
+        age = datetime.now() - self.birth_time
+        if age.days > 0:
+            return f"{age.days} days"
+        elif age.seconds > 3600:
+            return f"{age.seconds // 3600} hours"
+        else:
+            return f"{age.seconds // 60} minutes"
+
+
+class GameMechanics:
+    """Core game mechanics inspired by Digimon World 1"""
+    
+    def __init__(self):
+        # Stat ranges and limits
+        self.stat_limits = {
+            'hp': (10, 999),
+            'attack': (5, 500),
+            'defense': (5, 500),
+            'speed': (5, 500),
+            'special': (5, 500)
+        }
+        
+        # Care thresholds
+        self.care_thresholds = {
+            'hunger': {'critical': 20, 'low': 40, 'good': 70},
+            'happiness': {'critical': 20, 'low': 40, 'good': 70},
+            'fatigue': {'good': 30, 'tired': 60, 'exhausted': 80},
+            'health': {'critical': 30, 'low': 50, 'good': 80}
+        }
+        
+        # Training effectiveness modifiers
+        self.training_modifiers = {
+            'strength': {'attack': 1.5, 'defense': 0.8, 'speed': 0.7},
+            'defense': {'attack': 0.7, 'defense': 1.5, 'hp': 1.2},
+            'speed': {'speed': 1.5, 'attack': 0.9, 'special': 0.8},
+            'intelligence': {'special': 1.5, 'defense': 0.9, 'hp': 0.8},
+            'balanced': {'attack': 1.0, 'defense': 1.0, 'speed': 1.0, 'special': 1.0}
+        }
+        
+        # Evolution requirements (simplified)
+        self.evolution_requirements = {
+            'champion': {
+                'min_stats': 150,  # Total stats
+                'min_care': 60,    # Average care percentage
+                'min_age': 1,      # Days
+                'training_count': 10
+            },
+            'ultimate': {
+                'min_stats': 300,
+                'min_care': 70,
+                'min_age': 3,
+                'training_count': 30
+            },
+            'mega': {
+                'min_stats': 500,
+                'min_care': 80,
+                'min_age': 7,
+                'training_count': 50
+            }
+        }
+    
+    def create_monster(self, generation_result: Dict[str, Any], user_preferences: Dict = None) -> Monster:
+        """Create a new monster from AI generation results"""
+        
+        traits = generation_result.get('traits', {})
+        preferences = user_preferences or {}
+        
+        # Generate base stats based on traits and preferences
+        base_stats = self._generate_base_stats(traits, preferences.get('training_focus', 'balanced'))
+        
+        # Initialize care state
+        care_state = {
+            'hunger': 80.0,
+            'happiness': 90.0,
+            'fatigue': 10.0,
+            'health': 100.0
+        }
+        
+        # Determine personality from traits
+        personality = self._determine_personality(traits)
+        
+        # Create monster name
+        name = traits.get('name', self._generate_name(traits))
+        
+        # Create monster instance
+        monster = Monster(
+            name=name,
+            species=traits.get('species', 'DigiPal'),
+            stage='rookie',
+            stats=base_stats,
+            care_state=care_state,
+            personality=personality,
+            birth_time=datetime.now(),
+            image_path=generation_result.get('image'),
+            model_3d_path=generation_result.get('model_3d')
+        )
+        
+        return monster
+    
+    def _generate_base_stats(self, traits: Dict, focus: str) -> Dict[str, int]:
+        """Generate base stats based on traits and focus"""
+        # Base values
+        base = {
+            'hp': random.randint(50, 100),
+            'attack': random.randint(15, 35),
+            'defense': random.randint(15, 35),
+            'speed': random.randint(15, 35),
+            'special': random.randint(15, 35)
+        }
+        
+        # Apply focus modifiers
+        if focus in self.training_modifiers:
+            for stat, modifier in self.training_modifiers[focus].items():
+                if stat in base:
+                    base[stat] = int(base[stat] * modifier)
+        
+        # Apply trait-based modifiers
+        if traits.get('element') == 'fire':
+            base['attack'] += 10
+            base['special'] += 5
+        elif traits.get('element') == 'water':
+            base['defense'] += 10
+            base['hp'] += 20
+        elif traits.get('element') == 'earth':
+            base['defense'] += 15
+            base['hp'] += 10
+        elif traits.get('element') == 'wind':
+            base['speed'] += 15
+            base['special'] += 5
+        
+        # Ensure stats are within limits
+        for stat in base:
+            base[stat] = max(self.stat_limits[stat][0], 
+                           min(self.stat_limits[stat][1], base[stat]))
+        
+        return base
+    
+    def _determine_personality(self, traits: Dict) -> Dict[str, Any]:
+        """Determine monster personality from traits"""
+        personality_traits = [
+            'brave', 'timid', 'aggressive', 'gentle', 
+            'playful', 'serious', 'loyal', 'independent'
+        ]
+        
+        # Select primary trait
+        primary = traits.get('personality', random.choice(personality_traits))
+        
+        # Generate personality profile
+        return {
+            'primary': primary,
+            'likes': self._generate_likes(primary),
+            'dislikes': self._generate_dislikes(primary),
+            'training_preference': self._get_training_preference(primary),
+            'battle_style': self._get_battle_style(primary)
+        }
+    
+    def _generate_name(self, traits: Dict) -> str:
+        """Generate a name if not provided"""
+        prefixes = ['Digi', 'Cyber', 'Tech', 'Neo', 'Alpha', 'Beta']
+        suffixes = ['mon', 'pal', 'byte', 'bit', 'tron', 'x']
+        
+        prefix = random.choice(prefixes)
+        suffix = random.choice(suffixes)
+        
+        return f"{prefix}{suffix}"
+    
+    def _generate_likes(self, personality: str) -> List[str]:
+        """Generate things the monster likes based on personality"""
+        likes_map = {
+            'brave': ['battles', 'challenges', 'meat'],
+            'timid': ['quiet places', 'vegetables', 'praise'],
+            'aggressive': ['training', 'meat', 'battles'],
+            'gentle': ['praise', 'vegetables', 'playing'],
+            'playful': ['games', 'treats', 'attention'],
+            'serious': ['training', 'discipline', 'fish'],
+            'loyal': ['praise', 'companionship', 'meat'],
+            'independent': ['exploration', 'variety', 'fish']
+        }
+        return likes_map.get(personality, ['food', 'play', 'rest'])
+    
+    def _generate_dislikes(self, personality: str) -> List[str]:
+        """Generate things the monster dislikes based on personality"""
+        dislikes_map = {
+            'brave': ['running away', 'vegetables', 'rest'],
+            'timid': ['battles', 'loud noises', 'scolding'],
+            'aggressive': ['vegetables', 'rest', 'gentle training'],
+            'gentle': ['battles', 'scolding', 'meat'],
+            'playful': ['discipline', 'vegetables', 'being ignored'],
+            'serious': ['games', 'treats', 'slacking'],
+            'loyal': ['being alone', 'scolding', 'betrayal'],
+            'independent': ['clingy behavior', 'routine', 'vegetables']
+        }
+        return dislikes_map.get(personality, ['scolding', 'hunger', 'fatigue'])
+    
+    def _get_training_preference(self, personality: str) -> str:
+        """Get preferred training type based on personality"""
+        preferences = {
+            'brave': 'strength',
+            'timid': 'defense',
+            'aggressive': 'strength',
+            'gentle': 'intelligence',
+            'playful': 'speed',
+            'serious': 'balanced',
+            'loyal': 'defense',
+            'independent': 'speed'
+        }
+        return preferences.get(personality, 'balanced')
+    
+    def _get_battle_style(self, personality: str) -> str:
+        """Get battle style based on personality"""
+        styles = {
+            'brave': 'offensive',
+            'timid': 'defensive',
+            'aggressive': 'berserker',
+            'gentle': 'support',
+            'playful': 'trickster',
+            'serious': 'tactical',
+            'loyal': 'guardian',
+            'independent': 'adaptive'
+        }
+        return styles.get(personality, 'balanced')
+    
+    def train_monster(self, monster: Monster, training_type: str, intensity: int) -> Dict[str, Any]:
+        """Train the monster to improve stats"""
+        
+        # Check if monster can train
+        if monster.care_state['fatigue'] > self.care_thresholds['fatigue']['exhausted']:
+            return {
+                'success': False,
+                'message': f"{monster.name} is too tired to train! 😴💤",
+                'stat_changes': {}
+            }
+        
+        if monster.care_state['hunger'] < self.care_thresholds['hunger']['low']:
+            return {
+                'success': False,
+                'message': f"{monster.name} is too hungry to train! 🍖❓",
+                'stat_changes': {}
+            }
+        
+        # Calculate stat gains
+        base_gain = intensity * 2
+        stat_gains = {}
+        
+        # Apply training type modifiers
+        training_type = training_type.lower()
+        if training_type in self.training_modifiers:
+            for stat, modifier in self.training_modifiers[training_type].items():
+                if stat in monster.stats:
+                    gain = int(base_gain * modifier * random.uniform(0.8, 1.2))
+                    
+                    # Personality bonus
+                    if training_type == monster.personality['training_preference']:
+                        gain = int(gain * 1.2)
+                    
+                    # Apply gain with stat limits
+                    old_value = monster.stats[stat]
+                    new_value = min(self.stat_limits[stat][1], old_value + gain)
+                    actual_gain = new_value - old_value
+                    
+                    if actual_gain > 0:
+                        monster.stats[stat] = new_value
+                        stat_gains[stat] = actual_gain
+        
+        # Update care state
+        fatigue_gain = intensity * 5 + random.randint(0, 10)
+        happiness_gain = 5 if training_type == monster.personality['training_preference'] else 2
+        
+        monster.care_state['fatigue'] = min(100, monster.care_state['fatigue'] + fatigue_gain)
+        monster.care_state['happiness'] = min(100, monster.care_state['happiness'] + happiness_gain)
+        monster.care_state['hunger'] = max(0, monster.care_state['hunger'] - intensity * 2)
+        
+        # Update training count
+        monster.training_count += 1
+        
+        # Check for evolution
+        evolution_check = self.check_evolution(monster)
+        
+        # Generate response message
+        if stat_gains:
+            gains_text = ", ".join([f"{stat.upper()} +{gain}" for stat, gain in stat_gains.items()])
+            message = f"💪 Training complete! {gains_text}"
+        else:
+            message = f"📈 {monster.name} has reached stat limits in this area!"
+        
+        return {
+            'success': True,
+            'message': message,
+            'stat_changes': stat_gains,
+            'fatigue_gained': fatigue_gain,
+            'evolution_check': evolution_check
+        }
+    
+    def check_evolution(self, monster: Monster) -> Optional[Dict[str, Any]]:
+        """Check if monster meets evolution requirements"""
+        
+        current_stage = monster.stage
+        next_stage = None
+        
+        if current_stage == 'rookie':
+            next_stage = 'champion'
+        elif current_stage == 'champion':
+            next_stage = 'ultimate'
+        elif current_stage == 'ultimate':
+            next_stage = 'mega'
+        else:
+            return None
+        
+        requirements = self.evolution_requirements.get(next_stage)
+        if not requirements:
+            return None
+        
+        # Check requirements
+        total_stats = sum(monster.stats.values())
+        avg_care = sum(monster.care_state.values()) / len(monster.care_state)
+        age_days = (datetime.now() - monster.birth_time).days
+        
+        meets_requirements = (
+            total_stats >= requirements['min_stats'] and
+            avg_care >= requirements['min_care'] and
+            age_days >= requirements['min_age'] and
+            monster.training_count >= requirements['training_count']
+        )
+        
+        if meets_requirements:
+            return {
+                'can_evolve': True,
+                'next_stage': next_stage,
+                'message': f"✨ {monster.name} is ready to evolve to {next_stage}!"
+            }
+        else:
+            return {
+                'can_evolve': False,
+                'next_stage': next_stage,
+                'progress': {
+                    'stats': f"{total_stats}/{requirements['min_stats']}",
+                    'care': f"{avg_care:.0f}%/{requirements['min_care']}%",
+                    'age': f"{age_days}/{requirements['min_age']} days",
+                    'training': f"{monster.training_count}/{requirements['training_count']}"
+                }
+            }
+    
+    def feed_monster(self, monster: Monster, food_type: str) -> Dict[str, Any]:
+        """Feed the monster"""
+        
+        food_effects = {
+            'meat': {'hunger': 40, 'happiness': 10, 'health': 5},
+            'fish': {'hunger': 35, 'happiness': 15, 'health': 10},
+            'vegetable': {'hunger': 30, 'happiness': 5, 'health': 15},
+            'treat': {'hunger': 20, 'happiness': 30, 'health': 0},
+            'medicine': {'hunger': 0, 'happiness': -10, 'health': 50}
+        }
+        
+        effects = food_effects.get(food_type.lower(), food_effects['meat'])
+        
+        # Apply personality preferences
+        likes_food = food_type.lower() in [like.lower() for like in monster.personality.get('likes', [])]
+        dislikes_food = food_type.lower() in [dislike.lower() for dislike in monster.personality.get('dislikes', [])]
+        
+        if likes_food:
+            effects['happiness'] *= 2
+        elif dislikes_food:
+            effects['happiness'] = -abs(effects['happiness'])
+        
+        # Update care state
+        old_hunger = monster.care_state['hunger']
+        monster.care_state['hunger'] = min(100, monster.care_state['hunger'] + effects['hunger'])
+        monster.care_state['happiness'] = max(0, min(100, monster.care_state['happiness'] + effects['happiness']))
+        monster.care_state['health'] = min(100, monster.care_state['health'] + effects['health'])
+        
+        # Generate response
+        if likes_food:
+            message = f"😋 {monster.name} loves {food_type}! 💖"
+        elif dislikes_food:
+            message = f"😒 {monster.name} doesn't like {food_type}... 💔"
+        elif old_hunger < 30:
+            message = f"🍽️ {monster.name} was very hungry! Much better now! 😊"
+        else:
+            message = f"🍴 {monster.name} enjoyed the {food_type}! 👍"
+        
+        return {
+            'success': True,
+            'message': message,
+            'effects': effects,
+            'current_state': monster.care_state
+        }
+    
+    def update_care_state(self, monster: Monster, time_passed: timedelta) -> Dict[str, Any]:
+        """Update monster care state based on time passed"""
+        
+        # Calculate hours passed
+        hours = time_passed.total_seconds() / 3600
+        
+        # Decrease hunger and happiness over time
+        monster.care_state['hunger'] = max(0, monster.care_state['hunger'] - hours * 5)
+        monster.care_state['happiness'] = max(0, monster.care_state['happiness'] - hours * 2)
+        
+        # Decrease fatigue over time (rest)
+        monster.care_state['fatigue'] = max(0, monster.care_state['fatigue'] - hours * 10)
+        
+        # Health changes based on other stats
+        if monster.care_state['hunger'] < 20:
+            monster.care_state['health'] = max(0, monster.care_state['health'] - hours * 3)
+        elif monster.care_state['happiness'] < 20:
+            monster.care_state['health'] = max(0, monster.care_state['health'] - hours * 1)
+        
+        # Check for critical states
+        alerts = []
+        if monster.care_state['hunger'] < self.care_thresholds['hunger']['critical']:
+            alerts.append("🍖 Your monster is starving!")
+        if monster.care_state['happiness'] < self.care_thresholds['happiness']['critical']:
+            alerts.append("😢 Your monster is very unhappy!")
+        if monster.care_state['health'] < self.care_thresholds['health']['critical']:
+            alerts.append("🏥 Your monster needs medical attention!")
+        
+        return {
+            'updated_state': monster.care_state,
+            'alerts': alerts,
+            'time_since_update': str(time_passed)
+        }
\ No newline at end of file
diff --git a/core/state_manager.py b/core/state_manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee67cf407e907d9ca4952b0e5fab26d947e8d738
--- /dev/null
+++ b/core/state_manager.py
@@ -0,0 +1,280 @@
+import json
+import os
+from pathlib import Path
+from datetime import datetime, timedelta
+from typing import Dict, List, Optional, Any
+import shutil
+from core.game_mechanics import Monster
+
+class StateManager:
+    """Manages persistent state for users and monsters"""
+    
+    def __init__(self, data_dir: Path):
+        self.data_dir = Path(data_dir)
+        self.users_dir = self.data_dir / "users"
+        self.monsters_dir = self.data_dir / "monsters"
+        self.cache_dir = self.data_dir / "cache"
+        
+        # Create directories if they don't exist
+        for dir_path in [self.users_dir, self.monsters_dir, self.cache_dir]:
+            dir_path.mkdir(parents=True, exist_ok=True)
+        
+        # In-memory cache for active sessions
+        self.active_sessions = {}
+        self.last_save_time = {}
+    
+    def get_user_dir(self, user_id: str) -> Path:
+        """Get or create user directory"""
+        user_dir = self.users_dir / user_id
+        user_dir.mkdir(exist_ok=True)
+        return user_dir
+    
+    def save_monster(self, user_id: str, monster: Monster) -> bool:
+        """Save monster to persistent storage"""
+        try:
+            user_dir = self.get_user_dir(user_id)
+            
+            # Save monster data
+            monster_file = user_dir / f"monster_{monster.name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
+            with open(monster_file, 'w') as f:
+                json.dump(monster.to_dict(), f, indent=2)
+            
+            # Update current monster reference
+            current_file = user_dir / "current_monster.json"
+            current_data = {
+                'monster_file': str(monster_file.name),
+                'monster_name': monster.name,
+                'last_updated': datetime.now().isoformat()
+            }
+            with open(current_file, 'w') as f:
+                json.dump(current_data, f, indent=2)
+            
+            # Update user profile
+            self._update_user_profile(user_id, monster)
+            
+            # Cache in memory
+            self.active_sessions[user_id] = {
+                'monster': monster,
+                'last_access': datetime.now()
+            }
+            
+            return True
+            
+        except Exception as e:
+            print(f"Error saving monster: {e}")
+            return False
+    
+    def get_current_monster(self, user_id: str) -> Optional[Monster]:
+        """Get the current active monster for a user"""
+        
+        # Check memory cache first
+        if user_id in self.active_sessions:
+            session = self.active_sessions[user_id]
+            if datetime.now() - session['last_access'] < timedelta(minutes=30):
+                session['last_access'] = datetime.now()
+                return session['monster']
+        
+        # Load from disk
+        try:
+            user_dir = self.get_user_dir(user_id)
+            current_file = user_dir / "current_monster.json"
+            
+            if not current_file.exists():
+                return None
+            
+            with open(current_file, 'r') as f:
+                current_data = json.load(f)
+            
+            monster_file = user_dir / current_data['monster_file']
+            if not monster_file.exists():
+                return None
+            
+            with open(monster_file, 'r') as f:
+                monster_data = json.load(f)
+            
+            monster = Monster.from_dict(monster_data)
+            
+            # Update cache
+            self.active_sessions[user_id] = {
+                'monster': monster,
+                'last_access': datetime.now()
+            }
+            
+            return monster
+            
+        except Exception as e:
+            print(f"Error loading monster: {e}")
+            return None
+    
+    def update_monster(self, user_id: str, monster: Monster) -> bool:
+        """Update existing monster data"""
+        
+        # Update in memory
+        if user_id in self.active_sessions:
+            self.active_sessions[user_id]['monster'] = monster
+            self.active_sessions[user_id]['last_access'] = datetime.now()
+        
+        # Save periodically (every 5 minutes) or if important changes
+        should_save = False
+        current_time = datetime.now()
+        
+        if user_id not in self.last_save_time:
+            should_save = True
+        else:
+            time_since_save = current_time - self.last_save_time[user_id]
+            if time_since_save > timedelta(minutes=5):
+                should_save = True
+        
+        # Always save on evolution or critical states
+        if monster.care_state['health'] < 30 or monster.care_state['hunger'] < 20:
+            should_save = True
+        
+        if should_save:
+            self.last_save_time[user_id] = current_time
+            return self.save_monster(user_id, monster)
+        
+        return True
+    
+    def get_user_monsters(self, user_id: str) -> List[Dict[str, Any]]:
+        """Get all monsters for a user"""
+        try:
+            user_dir = self.get_user_dir(user_id)
+            monsters = []
+            
+            for file_path in user_dir.glob("monster_*.json"):
+                if file_path.name != "current_monster.json":
+                    with open(file_path, 'r') as f:
+                        monster_data = json.load(f)
+                        monsters.append({
+                            'file': file_path.name,
+                            'name': monster_data.get('name'),
+                            'species': monster_data.get('species'),
+                            'stage': monster_data.get('stage'),
+                            'birth_time': monster_data.get('birth_time')
+                        })
+            
+            # Sort by birth time (newest first)
+            monsters.sort(key=lambda x: x['birth_time'], reverse=True)
+            return monsters
+            
+        except Exception as e:
+            print(f"Error getting user monsters: {e}")
+            return []
+    
+    def _update_user_profile(self, user_id: str, monster: Monster):
+        """Update user profile with monster statistics"""
+        try:
+            user_dir = self.get_user_dir(user_id)
+            profile_file = user_dir / "profile.json"
+            
+            # Load existing profile or create new
+            if profile_file.exists():
+                with open(profile_file, 'r') as f:
+                    profile = json.load(f)
+            else:
+                profile = {
+                    'user_id': user_id,
+                    'created': datetime.now().isoformat(),
+                    'monsters_created': 0,
+                    'total_training_sessions': 0,
+                    'achievements': []
+                }
+            
+            # Update statistics
+            profile['monsters_created'] = profile.get('monsters_created', 0) + 1
+            profile['last_active'] = datetime.now().isoformat()
+            profile['current_monster'] = monster.name
+            
+            # Check for achievements
+            new_achievements = self._check_achievements(profile, monster)
+            profile['achievements'].extend(new_achievements)
+            
+            # Save profile
+            with open(profile_file, 'w') as f:
+                json.dump(profile, f, indent=2)
+                
+        except Exception as e:
+            print(f"Error updating user profile: {e}")
+    
+    def _check_achievements(self, profile: Dict, monster: Monster) -> List[Dict[str, Any]]:
+        """Check for new achievements"""
+        achievements = []
+        current_achievements = {a['id'] for a in profile.get('achievements', [])}
+        
+        # First monster achievement
+        if profile['monsters_created'] == 1 and 'first_monster' not in current_achievements:
+            achievements.append({
+                'id': 'first_monster',
+                'name': 'Digital Pioneer',
+                'description': 'Created your first digital monster',
+                'icon': '🥇',
+                'unlocked': datetime.now().isoformat()
+            })
+        
+        # Multiple monsters achievement
+        if profile['monsters_created'] == 5 and 'monster_collector' not in current_achievements:
+            achievements.append({
+                'id': 'monster_collector',
+                'name': 'Monster Collector',
+                'description': 'Created 5 digital monsters',
+                'icon': '🏆',
+                'unlocked': datetime.now().isoformat()
+            })
+        
+        # Perfect care achievement
+        if all(monster.care_state[stat] >= 90 for stat in ['hunger', 'happiness', 'health']):
+            if 'perfect_care' not in current_achievements:
+                achievements.append({
+                    'id': 'perfect_care',
+                    'name': 'Perfect Caretaker',
+                    'description': 'Achieved perfect care status',
+                    'icon': '💖',
+                    'unlocked': datetime.now().isoformat()
+                })
+        
+        return achievements
+    
+    def get_user_profile(self, user_id: str) -> Optional[Dict[str, Any]]:
+        """Get user profile"""
+        try:
+            user_dir = self.get_user_dir(user_id)
+            profile_file = user_dir / "profile.json"
+            
+            if profile_file.exists():
+                with open(profile_file, 'r') as f:
+                    return json.load(f)
+            return None
+            
+        except Exception as e:
+            print(f"Error loading user profile: {e}")
+            return None
+    
+    def cleanup_old_sessions(self):
+        """Clean up old sessions from memory"""
+        current_time = datetime.now()
+        expired_users = []
+        
+        for user_id, session in self.active_sessions.items():
+            if current_time - session['last_access'] > timedelta(hours=1):
+                expired_users.append(user_id)
+        
+        for user_id in expired_users:
+            # Save before removing from cache
+            if 'monster' in self.active_sessions[user_id]:
+                self.save_monster(user_id, self.active_sessions[user_id]['monster'])
+            del self.active_sessions[user_id]
+    
+    def export_user_data(self, user_id: str) -> Optional[str]:
+        """Export all user data as a zip file"""
+        try:
+            user_dir = self.get_user_dir(user_id)
+            export_path = self.cache_dir / f"export_{user_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
+            
+            # Create zip archive
+            shutil.make_archive(str(export_path), 'zip', user_dir)
+            
+            return f"{export_path}.zip"
+            
+        except Exception as e:
+            print(f"Error exporting user data: {e}")
+            return None
\ No newline at end of file
diff --git a/docs/HUNYUAN3D_INTEGRATION.md b/docs/HUNYUAN3D_INTEGRATION.md
deleted file mode 100644
index ab48bb9c83cffe36ab3699ab9b7dbf5b87fa4b52..0000000000000000000000000000000000000000
--- a/docs/HUNYUAN3D_INTEGRATION.md
+++ /dev/null
@@ -1,264 +0,0 @@
-# Hunyuan3D-2.1 Integration for DigiPal
-
-## Overview
-
-The Hunyuan3D pipeline integrates Tencent's state-of-the-art Hunyuan3D-2.1 model into DigiPal, providing advanced 3D generation capabilities for monster creation. This pipeline offers multiple generation modes, high-quality outputs, and seamless integration with the existing DigiPal monster system.
-
-## Features
-
-### Core Capabilities
-
-1. **Text-to-3D Generation**
-   - Generate 3D models from text descriptions
-   - Automatic concept image generation
-   - Two-stage process: shape generation + texture generation
-
-2. **Image-to-3D Generation**
-   - Convert single images to 3D models
-   - Automatic background removal
-   - Foreground ratio control for optimal results
-
-3. **Multi-View Generation**
-   - Generate from front, back, left, and right views
-   - Higher accuracy than single image input
-   - Ideal for complex monster designs
-
-### Generation Modes
-
-- **Turbo Mode**: Fastest generation, suitable for prototypes and baby monsters
-- **Fast Mode**: Balanced speed and quality, ideal for most use cases
-- **Standard Mode**: Best quality, recommended for final assets
-
-### Export Formats
-
-- **GLB** (recommended): GLTF binary format with embedded textures
-- **OBJ**: Wavefront format for compatibility
-- **PLY**: Point cloud format
-- **STL**: For 3D printing applications
-
-### Texture Options
-
-- **RGB**: Standard color textures
-- **PBR**: Physically-based rendering with metallic, roughness, and normal maps
-
-## Installation
-
-1. Ensure you have the required dependencies:
-```bash
-pip install gradio_client>=0.8.0 trimesh>=4.0.0 aiohttp>=3.9.0
-```
-
-2. The pipeline is located at: `src/pipelines/hunyuan3d_pipeline.py`
-
-## Configuration
-
-Create a configuration file or use the default settings:
-
-```python
-from src.pipelines.hunyuan3d_pipeline import Hunyuan3DConfig, GenerationMode
-
-config = Hunyuan3DConfig(
-    space_id="Tencent/Hunyuan3D-2",  # Official HF Space
-    default_mode=GenerationMode.FAST,
-    texture_method=TextureMethod.RGB,
-    export_format=ExportFormat.GLB,
-    target_polycount=30000,
-    enable_optimization=True
-)
-```
-
-## Usage Examples
-
-### Basic Text-to-3D Generation
-
-```python
-from src.pipelines.hunyuan3d_pipeline import Hunyuan3DPipeline, GenerationMode
-
-# Initialize pipeline
-pipeline = Hunyuan3DPipeline()
-
-# Generate from text
-result = await pipeline.generate_from_text(
-    prompt="cute blue dragon with big eyes and small wings",
-    name="BlueDragon",
-    mode=GenerationMode.FAST
-)
-
-if result["success"]:
-    print(f"Model saved at: {result['paths']['processed_model']}")
-```
-
-### Image-to-3D Generation
-
-```python
-# Generate from a single image
-result = await pipeline.generate_from_image(
-    image_path="dragon_concept.png",
-    name="DragonFromImage",
-    mode=GenerationMode.STANDARD
-)
-```
-
-### Multi-View Generation
-
-```python
-# Generate from multiple views for better accuracy
-views = {
-    "front": "dragon_front.png",
-    "back": "dragon_back.png",
-    "left": "dragon_left.png",
-    "right": "dragon_right.png"
-}
-
-result = await pipeline.generate_from_multi_view(
-    image_paths=views,
-    name="DragonMultiView",
-    mode=GenerationMode.STANDARD
-)
-```
-
-### DigiPal Monster Integration
-
-```python
-from src.pipelines.hunyuan3d_pipeline import DigiPalHunyuan3DIntegration
-
-# Initialize integration
-integration = DigiPalHunyuan3DIntegration()
-
-# Generate model for a DigiPal monster
-result = await integration.generate_monster_model(
-    monster=my_monster,  # DW1Monster instance
-    force_regenerate=False
-)
-```
-
-## Integration with DigiPal System
-
-### Automatic Monster Generation
-
-The pipeline automatically creates appropriate prompts based on monster attributes:
-
-- **Stage**: Determines model complexity (baby → ultimate)
-- **Personality**: Influences visual style and pose
-- **Species Type**: Affects color scheme and texture
-- **Stats**: High offense adds claws, high defense adds armor, etc.
-
-### Evolution Support
-
-Generate complete evolution sequences:
-
-```python
-# Generate all evolution stages
-results = await integration.generate_evolution_sequence(
-    monster=my_monster,
-    stages=["baby", "child", "adult", "perfect", "ultimate"]
-)
-```
-
-### Caching System
-
-- Models are automatically cached to avoid regeneration
-- Cache key based on monster ID and evolution stage
-- Force regeneration available when needed
-
-## Performance Optimization
-
-### Model Optimization
-
-- Automatic mesh simplification to target polycount
-- Geometry cleanup (degenerate faces, duplicates)
-- UV optimization for efficient texture mapping
-- Watertight mesh generation for rigging
-
-### Generation Speed
-
-- **Turbo Mode**: ~30-60 seconds
-- **Fast Mode**: ~1-2 minutes
-- **Standard Mode**: ~2-4 minutes
-
-### Batch Processing
-
-Generate multiple models concurrently:
-
-```python
-tasks = [
-    {"prompt": "fire dragon", "name": "FireDragon"},
-    {"prompt": "water turtle", "name": "WaterTurtle"},
-    {"prompt": "electric mouse", "name": "ElectricMouse"}
-]
-
-results = await pipeline.batch_generate(
-    tasks,
-    max_concurrent=2  # Process 2 at a time
-)
-```
-
-## Comparison with Other Pipelines
-
-| Feature | Hunyuan3D | Meshy AI | Open Source |
-|---------|-----------|----------|-------------|
-| Text-to-3D | ✓ | ✓ | ✓ |
-| Image-to-3D | ✓ | Limited | ✓ |
-| Multi-View | ✓ | ✗ | ✓ |
-| Speed | Fast | Medium | Slow |
-| Quality | High | High | Medium |
-| PBR Textures | ✓ | ✓ | Limited |
-| API Cost | Free* | Paid | Free |
-| Auto-Rigging | ✗ | ✓ | ✓ |
-
-*Free via Hugging Face Spaces, subject to usage limits
-
-## Best Practices
-
-### Prompt Engineering
-
-1. **Be Specific**: Include details about size, color, features
-2. **Use T-Pose**: Add "T-pose" for better rigging compatibility
-3. **Neutral Background**: Specify "neutral background" for cleaner results
-4. **Game Asset**: Include "game character" or "game asset" for optimization
-
-### Quality Settings
-
-- Use **Turbo** mode for rapid prototyping
-- Use **Fast** mode for development and testing
-- Use **Standard** mode for final production assets
-
-### Multi-View Tips
-
-- Ensure consistent lighting across all views
-- Use the same background for all images
-- Maintain the same scale and position
-- Remove shadows for better reconstruction
-
-## Troubleshooting
-
-### Common Issues
-
-1. **Connection Failed**: Check internet connection and HF Space availability
-2. **Generation Timeout**: Reduce quality settings or use Turbo mode
-3. **Low Quality Output**: Use Standard mode or provide better input images
-4. **Missing Textures**: Ensure texture_method is set correctly
-
-### Error Handling
-
-The pipeline includes comprehensive error handling:
-- Automatic retries with exponential backoff
-- Graceful fallbacks for failed generations
-- Detailed error messages in results
-
-## Future Enhancements
-
-- [ ] Animation generation support
-- [ ] Advanced rigging integration
-- [ ] Real-time preview during generation
-- [ ] Custom texture painting
-- [ ] Physics simulation setup
-- [ ] LOD (Level of Detail) generation
-
-## API Reference
-
-See the inline documentation in `src/pipelines/hunyuan3d_pipeline.py` for detailed API reference.
-
-## Credits
-
-This integration uses Tencent's Hunyuan3D-2.1 model, available through Hugging Face Spaces. Special thanks to the Tencent team for making this technology accessible.
\ No newline at end of file
diff --git a/frontend/app.html b/frontend/app.html
deleted file mode 100644
index 14f2c57f417de861d32878ba311f75af51d4795a..0000000000000000000000000000000000000000
--- a/frontend/app.html
+++ /dev/null
@@ -1,12 +0,0 @@
-<!doctype html>
-<html lang="en">
-  <head>
-    <meta charset="utf-8" />
-    <link rel="icon" href="%sveltekit.assets%/favicon.png" />
-    <meta name="viewport" content="width=device-width, initial-scale=1" />
-    %sveltekit.head%
-  </head>
-  <body data-sveltekit-preload-data="hover" class="bg-black text-white">
-    <div style="display: contents">%sveltekit.body%</div>
-  </body>
-</html>
\ No newline at end of file
diff --git a/frontend/frontend/.gitignore b/frontend/frontend/.gitignore
deleted file mode 100644
index 3b462cb0c4158547145078184d816bdcf314eaba..0000000000000000000000000000000000000000
--- a/frontend/frontend/.gitignore
+++ /dev/null
@@ -1,23 +0,0 @@
-node_modules
-
-# Output
-.output
-.vercel
-.netlify
-.wrangler
-/.svelte-kit
-/build
-
-# OS
-.DS_Store
-Thumbs.db
-
-# Env
-.env
-.env.*
-!.env.example
-!.env.test
-
-# Vite
-vite.config.js.timestamp-*
-vite.config.ts.timestamp-*
diff --git a/frontend/frontend/.npmrc b/frontend/frontend/.npmrc
deleted file mode 100644
index b6f27f135954640c8cc5bfd7b8c9922ca6eb2aad..0000000000000000000000000000000000000000
--- a/frontend/frontend/.npmrc
+++ /dev/null
@@ -1 +0,0 @@
-engine-strict=true
diff --git a/frontend/frontend/README.md b/frontend/frontend/README.md
deleted file mode 100644
index b5b295070b440f5eb2881ca213b1dc68a4e910ea..0000000000000000000000000000000000000000
--- a/frontend/frontend/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# sv
-
-Everything you need to build a Svelte project, powered by [`sv`](https://github.com/sveltejs/cli).
-
-## Creating a project
-
-If you're seeing this, you've probably already done this step. Congrats!
-
-```bash
-# create a new project in the current directory
-npx sv create
-
-# create a new project in my-app
-npx sv create my-app
-```
-
-## Developing
-
-Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server:
-
-```bash
-npm run dev
-
-# or start the server and open the app in a new browser tab
-npm run dev -- --open
-```
-
-## Building
-
-To create a production version of your app:
-
-```bash
-npm run build
-```
-
-You can preview the production build with `npm run preview`.
-
-> To deploy your app, you may need to install an [adapter](https://svelte.dev/docs/kit/adapters) for your target environment.
diff --git a/frontend/frontend/package.json b/frontend/frontend/package.json
deleted file mode 100644
index 130a68198dc8769f5c9f6a96b66e0fbf976ec7f0..0000000000000000000000000000000000000000
--- a/frontend/frontend/package.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
-	"name": "frontend",
-	"private": true,
-	"version": "0.0.1",
-	"type": "module",
-	"scripts": {
-		"dev": "vite dev",
-		"build": "vite build",
-		"preview": "vite preview",
-		"prepare": "svelte-kit sync || echo ''",
-		"check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
-		"check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch"
-	},
-	"devDependencies": {
-		"@sveltejs/adapter-auto": "^6.0.0",
-		"@sveltejs/kit": "^2.16.0",
-		"@sveltejs/vite-plugin-svelte": "^5.0.0",
-		"svelte": "^5.0.0",
-		"svelte-check": "^4.0.0",
-		"typescript": "^5.0.0",
-		"vite": "^6.2.6"
-	}
-}
diff --git a/frontend/frontend/src/app.d.ts b/frontend/frontend/src/app.d.ts
deleted file mode 100644
index da08e6da592d210d5cc574d8a629868eced88543..0000000000000000000000000000000000000000
--- a/frontend/frontend/src/app.d.ts
+++ /dev/null
@@ -1,13 +0,0 @@
-// See https://svelte.dev/docs/kit/types#app.d.ts
-// for information about these interfaces
-declare global {
-	namespace App {
-		// interface Error {}
-		// interface Locals {}
-		// interface PageData {}
-		// interface PageState {}
-		// interface Platform {}
-	}
-}
-
-export {};
diff --git a/frontend/frontend/src/app.html b/frontend/frontend/src/app.html
deleted file mode 100644
index 77a5ff52c9239ef2a5c38ba452c659f49e64a7db..0000000000000000000000000000000000000000
--- a/frontend/frontend/src/app.html
+++ /dev/null
@@ -1,12 +0,0 @@
-<!doctype html>
-<html lang="en">
-	<head>
-		<meta charset="utf-8" />
-		<link rel="icon" href="%sveltekit.assets%/favicon.png" />
-		<meta name="viewport" content="width=device-width, initial-scale=1" />
-		%sveltekit.head%
-	</head>
-	<body data-sveltekit-preload-data="hover">
-		<div style="display: contents">%sveltekit.body%</div>
-	</body>
-</html>
diff --git a/frontend/frontend/src/routes/+page.svelte b/frontend/frontend/src/routes/+page.svelte
deleted file mode 100644
index cc88df0ea3529d0fb891aa1e67f77b5228ade791..0000000000000000000000000000000000000000
--- a/frontend/frontend/src/routes/+page.svelte
+++ /dev/null
@@ -1,2 +0,0 @@
-<h1>Welcome to SvelteKit</h1>
-<p>Visit <a href="https://svelte.dev/docs/kit">svelte.dev/docs/kit</a> to read the documentation</p>
diff --git a/frontend/frontend/static/favicon.png b/frontend/frontend/static/favicon.png
deleted file mode 100644
index 825b9e65af7c104cfb07089bb28659393b4f2097..0000000000000000000000000000000000000000
Binary files a/frontend/frontend/static/favicon.png and /dev/null differ
diff --git a/frontend/frontend/svelte.config.js b/frontend/frontend/svelte.config.js
deleted file mode 100644
index 1295460d122f0bd1a34b530197ed691dd43c2fe0..0000000000000000000000000000000000000000
--- a/frontend/frontend/svelte.config.js
+++ /dev/null
@@ -1,18 +0,0 @@
-import adapter from '@sveltejs/adapter-auto';
-import { vitePreprocess } from '@sveltejs/vite-plugin-svelte';
-
-/** @type {import('@sveltejs/kit').Config} */
-const config = {
-	// Consult https://svelte.dev/docs/kit/integrations
-	// for more information about preprocessors
-	preprocess: vitePreprocess(),
-
-	kit: {
-		// adapter-auto only supports some environments, see https://svelte.dev/docs/kit/adapter-auto for a list.
-		// If your environment is not supported, or you settled on a specific environment, switch out the adapter.
-		// See https://svelte.dev/docs/kit/adapters for more information about adapters.
-		adapter: adapter()
-	}
-};
-
-export default config;
diff --git a/frontend/frontend/tsconfig.json b/frontend/frontend/tsconfig.json
deleted file mode 100644
index 0b2d8865f4efe30f84b6516a5de8d6bd6b927fb6..0000000000000000000000000000000000000000
--- a/frontend/frontend/tsconfig.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-	"extends": "./.svelte-kit/tsconfig.json",
-	"compilerOptions": {
-		"allowJs": true,
-		"checkJs": true,
-		"esModuleInterop": true,
-		"forceConsistentCasingInFileNames": true,
-		"resolveJsonModule": true,
-		"skipLibCheck": true,
-		"sourceMap": true,
-		"strict": true,
-		"moduleResolution": "bundler"
-	}
-	// Path aliases are handled by https://svelte.dev/docs/kit/configuration#alias
-	// except $lib which is handled by https://svelte.dev/docs/kit/configuration#files
-	//
-	// If you want to overwrite includes/excludes, make sure to copy over the relevant includes/excludes
-	// from the referenced tsconfig.json - TypeScript does not merge them in
-}
diff --git a/frontend/frontend/vite.config.ts b/frontend/frontend/vite.config.ts
deleted file mode 100644
index bbf8c7da43f0080dc6b9fb275f9583b7c17f1506..0000000000000000000000000000000000000000
--- a/frontend/frontend/vite.config.ts
+++ /dev/null
@@ -1,6 +0,0 @@
-import { sveltekit } from '@sveltejs/kit/vite';
-import { defineConfig } from 'vite';
-
-export default defineConfig({
-	plugins: [sveltekit()]
-});
diff --git a/frontend/package.json b/frontend/package.json
deleted file mode 100644
index 436531e3b72bc5b623aff0f45564f077372361f8..0000000000000000000000000000000000000000
--- a/frontend/package.json
+++ /dev/null
@@ -1,31 +0,0 @@
-{
-  "name": "digipal-frontend",
-  "version": "1.0.0",
-  "private": true,
-  "scripts": {
-    "dev": "vite dev",
-    "build": "vite build",
-    "preview": "vite preview",
-    "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
-    "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch"
-  },
-  "devDependencies": {
-    "@sveltejs/adapter-auto": "^3.0.0",
-    "@sveltejs/kit": "^2.0.0",
-    "@sveltejs/vite-plugin-svelte": "^3.0.0",
-    "@types/three": "^0.160.0",
-    "svelte": "^4.2.7",
-    "svelte-check": "^3.6.0",
-    "tslib": "^2.4.1",
-    "typescript": "^5.0.0",
-    "vite": "^5.0.3"
-  },
-  "dependencies": {
-    "@threlte/core": "^7.1.0",
-    "@threlte/extras": "^8.7.5",
-    "three": "^0.160.1",
-    "tailwindcss": "^3.4.0",
-    "autoprefixer": "^10.4.16",
-    "postcss": "^8.4.32"
-  }
-}
\ No newline at end of file
diff --git a/frontend/postcss.config.js b/frontend/postcss.config.js
deleted file mode 100644
index e99ebc2c0e00cc37de4cefee2bf2a332abb73d8d..0000000000000000000000000000000000000000
--- a/frontend/postcss.config.js
+++ /dev/null
@@ -1,6 +0,0 @@
-export default {
-  plugins: {
-    tailwindcss: {},
-    autoprefixer: {},
-  },
-}
\ No newline at end of file
diff --git a/frontend/src/app.css b/frontend/src/app.css
deleted file mode 100644
index b237d56551bb69c6788f98dbef62e960a76745f7..0000000000000000000000000000000000000000
--- a/frontend/src/app.css
+++ /dev/null
@@ -1,153 +0,0 @@
-@import url('https://fonts.googleapis.com/css2?family=Press+Start+2P&family=Inter:wght@400;500;600;700&display=swap');
-@tailwind base;
-@tailwind components;
-@tailwind utilities;
-
-@layer base {
-  body {
-    @apply bg-black text-white;
-  }
-}
-
-@layer components {
-  /* CRT effect */
-  .crt-effect {
-    position: relative;
-    overflow: hidden;
-  }
-  
-  .crt-effect::before {
-    content: " ";
-    display: block;
-    position: absolute;
-    top: 0;
-    left: 0;
-    bottom: 0;
-    right: 0;
-    background: linear-gradient(
-      rgba(18, 16, 16, 0) 50%,
-      rgba(0, 0, 0, 0.25) 50%
-    );
-    background-size: 100% 2px;
-    z-index: 2;
-    pointer-events: none;
-    animation: scan-lines 8s linear infinite;
-  }
-  
-  .crt-effect::after {
-    content: " ";
-    display: block;
-    position: absolute;
-    top: 0;
-    left: 0;
-    bottom: 0;
-    right: 0;
-    background: rgba(18, 16, 16, 0.1);
-    opacity: 0;
-    z-index: 2;
-    pointer-events: none;
-    animation: flicker 0.15s infinite;
-  }
-  
-  /* Holographic button */
-  .holographic-button {
-    @apply relative px-4 py-2 font-pixel text-xs uppercase tracking-wider;
-    background: linear-gradient(45deg, #00CED1, #FF6B00, #00CED1);
-    background-size: 200% 200%;
-    animation: hologram 3s ease-in-out infinite;
-    clip-path: polygon(10% 0%, 100% 0%, 90% 100%, 0% 100%);
-  }
-  
-  .holographic-button:hover {
-    @apply brightness-125;
-  }
-  
-  /* Device frame */
-  .device-frame {
-    @apply relative bg-digipal-gray rounded-3xl p-8 shadow-2xl;
-    background-image: 
-      radial-gradient(circle at 20% 80%, #FF6B00 0%, transparent 50%),
-      radial-gradient(circle at 80% 20%, #00CED1 0%, transparent 50%),
-      radial-gradient(circle at 40% 40%, #2D2D2D 0%, transparent 50%);
-  }
-  
-  /* D-pad button */
-  .dpad-button {
-    @apply bg-gray-800 hover:bg-gray-700 active:bg-gray-900 transition-colors;
-    @apply border-2 border-gray-600;
-  }
-  
-  /* Action button */
-  .action-button {
-    @apply rounded-full bg-gradient-to-br from-gray-700 to-gray-900;
-    @apply hover:from-gray-600 hover:to-gray-800 active:from-gray-800 active:to-black;
-    @apply transition-all duration-150 transform active:scale-95;
-    @apply shadow-lg;
-  }
-}
-
-@keyframes flicker {
-  0% {
-    opacity: 0.27861;
-  }
-  5% {
-    opacity: 0.34769;
-  }
-  10% {
-    opacity: 0.23604;
-  }
-  15% {
-    opacity: 0.90626;
-  }
-  20% {
-    opacity: 0.18128;
-  }
-  25% {
-    opacity: 0.83891;
-  }
-  30% {
-    opacity: 0.65583;
-  }
-  35% {
-    opacity: 0.67807;
-  }
-  40% {
-    opacity: 0.26559;
-  }
-  45% {
-    opacity: 0.84693;
-  }
-  50% {
-    opacity: 0.96019;
-  }
-  55% {
-    opacity: 0.08594;
-  }
-  60% {
-    opacity: 0.20313;
-  }
-  65% {
-    opacity: 0.71988;
-  }
-  70% {
-    opacity: 0.53455;
-  }
-  75% {
-    opacity: 0.37288;
-  }
-  80% {
-    opacity: 0.71428;
-  }
-  85% {
-    opacity: 0.70419;
-  }
-  90% {
-    opacity: 0.7003;
-  }
-  95% {
-    opacity: 0.36108;
-  }
-  100% {
-    opacity: 0.24387;
-  }
-}
\ No newline at end of file
diff --git a/frontend/src/routes/+layout.svelte b/frontend/src/routes/+layout.svelte
deleted file mode 100644
index 92f6606c60b01a25d3063c344bed89e3049a63c4..0000000000000000000000000000000000000000
--- a/frontend/src/routes/+layout.svelte
+++ /dev/null
@@ -1,5 +0,0 @@
-<script>
-  import '../app.css';
-</script>
-
-<slot />
\ No newline at end of file
diff --git a/frontend/src/routes/+page.svelte b/frontend/src/routes/+page.svelte
deleted file mode 100644
index f51f2615bedd1b8dcbaa3e535988e8e168a65dd1..0000000000000000000000000000000000000000
--- a/frontend/src/routes/+page.svelte
+++ /dev/null
@@ -1,14 +0,0 @@
-<script lang="ts">
-  import Device from '$lib/components/Device.svelte';
-  import { monsterStore } from '$lib/stores/monsterStore';
-  import { onMount } from 'svelte';
-  
-  onMount(() => {
-    // Initialize the app
-    monsterStore.initialize();
-  });
-</script>
-
-<main class="min-h-screen flex items-center justify-center bg-gradient-to-br from-gray-900 via-black to-gray-900">
-  <Device />
-</main>
\ No newline at end of file
diff --git a/frontend/svelte.config.js b/frontend/svelte.config.js
deleted file mode 100644
index 7e47852ca44b4d26ff54a3a0eb45c841e667c66b..0000000000000000000000000000000000000000
--- a/frontend/svelte.config.js
+++ /dev/null
@@ -1,14 +0,0 @@
-import adapter from '@sveltejs/adapter-auto';
-import { vitePreprocess } from '@sveltejs/vite-plugin-svelte';
-
-/** @type {import('@sveltejs/kit').Config} */
-const config = {
-  // Consult https://kit.svelte.dev/docs/integrations#preprocessors
-  preprocess: vitePreprocess(),
-
-  kit: {
-    adapter: adapter()
-  }
-};
-
-export default config;
\ No newline at end of file
diff --git a/frontend/tailwind.config.js b/frontend/tailwind.config.js
deleted file mode 100644
index 133ec537c5f5a1bf64a70260ba6254bb7a3c6565..0000000000000000000000000000000000000000
--- a/frontend/tailwind.config.js
+++ /dev/null
@@ -1,58 +0,0 @@
-/** @type {import('tailwindcss').Config} */
-export default {
-  content: ['./src/**/*.{html,js,svelte,ts}'],
-  theme: {
-    extend: {
-      colors: {
-        'digipal-orange': '#FF6B00',
-        'digipal-teal': '#00CED1',
-        'digipal-gray': '#2D2D2D',
-        'neon-magenta': '#FF00FF',
-        'neon-cyan': '#00FFFF',
-      },
-      fontFamily: {
-        'pixel': ['Press Start 2P', 'monospace'],
-        'modern': ['Inter', 'sans-serif'],
-      },
-      animation: {
-        'scan-lines': 'scan-lines 8s linear infinite',
-        'glitch': 'glitch 2s ease-in-out infinite alternate',
-        'hologram': 'hologram 3s ease-in-out infinite',
-      },
-      keyframes: {
-        'scan-lines': {
-          '0%': { transform: 'translateY(0)' },
-          '100%': { transform: 'translateY(100%)' },
-        },
-        'glitch': {
-          '0%': { 
-            textShadow: '0.05em 0 0 #00fffc, -0.03em -0.04em 0 #fc00ff, 0.025em 0.04em 0 #fffc00' 
-          },
-          '15%': { 
-            textShadow: '0.05em 0 0 #00fffc, -0.03em -0.04em 0 #fc00ff, 0.025em 0.04em 0 #fffc00' 
-          },
-          '16%': { 
-            textShadow: '-0.05em -0.025em 0 #00fffc, 0.025em 0.035em 0 #fc00ff, -0.05em -0.05em 0 #fffc00' 
-          },
-          '49%': { 
-            textShadow: '-0.05em -0.025em 0 #00fffc, 0.025em 0.035em 0 #fc00ff, -0.05em -0.05em 0 #fffc00' 
-          },
-          '50%': { 
-            textShadow: '0.05em 0.035em 0 #00fffc, 0.03em 0 0 #fc00ff, 0 -0.04em 0 #fffc00' 
-          },
-          '99%': { 
-            textShadow: '0.05em 0.035em 0 #00fffc, 0.03em 0 0 #fc00ff, 0 -0.04em 0 #fffc00' 
-          },
-          '100%': { 
-            textShadow: '-0.05em 0 0 #00fffc, -0.025em -0.04em 0 #fc00ff, -0.04em -0.025em 0 #fffc00' 
-          },
-        },
-        'hologram': {
-          '0%, 100%': { opacity: '1' },
-          '50%': { opacity: '0.7' },
-        },
-      },
-    },
-  },
-  plugins: [],
-}
\ No newline at end of file
diff --git a/frontend/tsconfig.json b/frontend/tsconfig.json
deleted file mode 100644
index b5fca7fbce36acf1ef42956900ab8bea3d71ab52..0000000000000000000000000000000000000000
--- a/frontend/tsconfig.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-  "extends": "./.svelte-kit/tsconfig.json",
-  "compilerOptions": {
-    "allowJs": true,
-    "checkJs": true,
-    "esModuleInterop": true,
-    "forceConsistentCasingInFileNames": true,
-    "resolveJsonModule": true,
-    "skipLibCheck": true,
-    "sourceMap": true,
-    "strict": true
-  }
-}
\ No newline at end of file
diff --git a/frontend/vite.config.ts b/frontend/vite.config.ts
deleted file mode 100644
index 3cb1df6168710138169cec92ea49797c74674af3..0000000000000000000000000000000000000000
--- a/frontend/vite.config.ts
+++ /dev/null
@@ -1,18 +0,0 @@
-import { sveltekit } from '@sveltejs/kit/vite';
-import { defineConfig } from 'vite';
-
-export default defineConfig({
-  plugins: [sveltekit()],
-  server: {
-    proxy: {
-      '/api': {
-        target: 'http://localhost:7861',
-        changeOrigin: true
-      },
-      '/api/ws': {
-        target: 'ws://localhost:7861',
-        ws: true
-      }
-    }
-  }
-});
\ No newline at end of file
diff --git a/game/__init__.py b/game/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1744cea3870c75b562561378c08aecedb645a9ad
--- /dev/null
+++ b/game/__init__.py
@@ -0,0 +1 @@
+# Game module initialization
\ No newline at end of file
diff --git a/models/__init__.py b/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..66fda43c5c2577171ef33f20a9ee493130702cf9
--- /dev/null
+++ b/models/__init__.py
@@ -0,0 +1 @@
+# Model processors initialization
\ No newline at end of file
diff --git a/models/image_generator.py b/models/image_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfc9551198246382f5e9335d5d48a162e77229bb
--- /dev/null
+++ b/models/image_generator.py
@@ -0,0 +1,253 @@
+import torch
+from diffusers import DiffusionPipeline
+from PIL import Image
+import numpy as np
+from typing import Optional, List, Union
+import gc
+
+class OmniGenImageGenerator:
+    """Image generation using OmniGen2 model"""
+    
+    def __init__(self, device: str = "cuda"):
+        self.device = device if torch.cuda.is_available() else "cpu"
+        self.pipeline = None
+        self.model_id = "OmniGen2/OmniGen2"  # Placeholder - actual model path may differ
+        
+        # Generation parameters
+        self.default_width = 512
+        self.default_height = 512
+        self.num_inference_steps = 30
+        self.guidance_scale = 7.5
+        
+        # Memory optimization
+        self.enable_attention_slicing = True
+        self.enable_vae_slicing = True
+        self.enable_cpu_offload = self.device == "cuda"
+    
+    def load_model(self):
+        """Lazy load the image generation model"""
+        if self.pipeline is None:
+            try:
+                # Determine torch dtype
+                torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
+                
+                # Load pipeline with optimizations
+                self.pipeline = DiffusionPipeline.from_pretrained(
+                    self.model_id,
+                    torch_dtype=torch_dtype,
+                    use_safetensors=True,
+                    variant="fp16" if self.device == "cuda" else None
+                )
+                
+                # Apply optimizations
+                if self.device == "cuda":
+                    if self.enable_cpu_offload:
+                        self.pipeline.enable_sequential_cpu_offload()
+                    else:
+                        self.pipeline = self.pipeline.to(self.device)
+                    
+                    if self.enable_attention_slicing:
+                        self.pipeline.enable_attention_slicing(1)
+                    
+                    if self.enable_vae_slicing:
+                        self.pipeline.enable_vae_slicing()
+                else:
+                    self.pipeline = self.pipeline.to(self.device)
+                
+                # Compile for faster inference (if available)
+                if hasattr(torch, 'compile') and self.device == "cuda":
+                    try:
+                        self.pipeline.unet = torch.compile(self.pipeline.unet, mode="reduce-overhead")
+                    except:
+                        pass  # Compilation is optional
+                
+            except Exception as e:
+                print(f"Failed to load image generation model: {e}")
+                # Try fallback to stable diffusion
+                try:
+                    self.model_id = "runwayml/stable-diffusion-v1-5"
+                    self._load_fallback_model()
+                except:
+                    raise
+    
+    def _load_fallback_model(self):
+        """Load fallback Stable Diffusion model"""
+        from diffusers import StableDiffusionPipeline
+        
+        torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
+        
+        self.pipeline = StableDiffusionPipeline.from_pretrained(
+            self.model_id,
+            torch_dtype=torch_dtype,
+            use_safetensors=True
+        )
+        
+        if self.device == "cuda" and self.enable_cpu_offload:
+            self.pipeline.enable_sequential_cpu_offload()
+        else:
+            self.pipeline = self.pipeline.to(self.device)
+    
+    def generate(self, 
+                prompt: str,
+                reference_images: Optional[List[Union[str, Image.Image]]] = None,
+                negative_prompt: Optional[str] = None,
+                width: Optional[int] = None,
+                height: Optional[int] = None,
+                num_images: int = 1,
+                seed: Optional[int] = None) -> Union[Image.Image, List[Image.Image]]:
+        """Generate monster image from prompt"""
+        
+        try:
+            # Load model if needed
+            self.load_model()
+            
+            # Set dimensions
+            width = width or self.default_width
+            height = height or self.default_height
+            
+            # Ensure dimensions are multiples of 8
+            width = (width // 8) * 8
+            height = (height // 8) * 8
+            
+            # Enhance prompt for monster generation
+            enhanced_prompt = self._enhance_prompt(prompt)
+            
+            # Default negative prompt for quality
+            if negative_prompt is None:
+                negative_prompt = (
+                    "low quality, blurry, distorted, disfigured, "
+                    "bad anatomy, wrong proportions, ugly, duplicate, "
+                    "morbid, mutilated, extra limbs, malformed"
+                )
+            
+            # Set seed for reproducibility
+            generator = None
+            if seed is not None:
+                generator = torch.Generator(device=self.device).manual_seed(seed)
+            
+            # Generate images
+            with torch.no_grad():
+                if hasattr(self.pipeline, '__call__'):
+                    # Standard diffusion pipeline
+                    images = self.pipeline(
+                        prompt=enhanced_prompt,
+                        negative_prompt=negative_prompt,
+                        width=width,
+                        height=height,
+                        num_inference_steps=self.num_inference_steps,
+                        guidance_scale=self.guidance_scale,
+                        num_images_per_prompt=num_images,
+                        generator=generator
+                    ).images
+                else:
+                    # OmniGen specific generation (if different API)
+                    images = self._omnigen_generate(
+                        enhanced_prompt, 
+                        reference_images,
+                        width, 
+                        height, 
+                        num_images
+                    )
+            
+            # Clean up memory
+            if self.device == "cuda":
+                torch.cuda.empty_cache()
+            
+            # Return single image or list
+            if num_images == 1:
+                return images[0]
+            return images
+            
+        except Exception as e:
+            print(f"Image generation error: {e}")
+            # Return fallback image
+            return self._generate_fallback_image(width, height)
+    
+    def _enhance_prompt(self, base_prompt: str) -> str:
+        """Enhance prompt for better monster generation"""
+        enhancements = [
+            "digital art",
+            "creature design", 
+            "game character",
+            "detailed",
+            "vibrant colors",
+            "fantasy creature",
+            "high quality",
+            "professional artwork"
+        ]
+        
+        # Combine base prompt with enhancements
+        enhanced = f"{base_prompt}, {', '.join(enhancements)}"
+        
+        return enhanced
+    
+    def _omnigen_generate(self, prompt: str, reference_images: Optional[List], 
+                         width: int, height: int, num_images: int) -> List[Image.Image]:
+        """OmniGen specific generation with multimodal inputs"""
+        # This would be implemented based on OmniGen's specific API
+        # For now, fall back to standard generation
+        return self.pipeline(
+            prompt=prompt,
+            width=width,
+            height=height,
+            num_images_per_prompt=num_images
+        ).images
+    
+    def _generate_fallback_image(self, width: int, height: int) -> Image.Image:
+        """Generate a fallback monster image"""
+        # Create a simple procedural monster image
+        img_array = np.zeros((height, width, 3), dtype=np.uint8)
+        
+        # Add some basic shapes and colors
+        center_x, center_y = width // 2, height // 2
+        radius = min(width, height) // 3
+        
+        # Create circular body
+        y, x = np.ogrid[:height, :width]
+        mask = (x - center_x)**2 + (y - center_y)**2 <= radius**2
+        
+        # Random monster color
+        color = np.random.randint(50, 200, size=3)
+        img_array[mask] = color
+        
+        # Add eyes
+        eye_y = center_y - radius // 3
+        eye_left_x = center_x - radius // 3
+        eye_right_x = center_x + radius // 3
+        eye_radius = radius // 8
+        
+        # Left eye
+        eye_mask = (x - eye_left_x)**2 + (y - eye_y)**2 <= eye_radius**2
+        img_array[eye_mask] = [255, 255, 255]
+        
+        # Right eye  
+        eye_mask = (x - eye_right_x)**2 + (y - eye_y)**2 <= eye_radius**2
+        img_array[eye_mask] = [255, 255, 255]
+        
+        # Convert to PIL Image
+        return Image.fromarray(img_array)
+    
+    def edit_image(self, 
+                  image: Union[str, Image.Image],
+                  prompt: str,
+                  mask: Optional[Union[str, Image.Image]] = None) -> Image.Image:
+        """Edit existing image (for future monster customization)"""
+        # This would implement image editing capabilities
+        raise NotImplementedError("Image editing not yet implemented")
+    
+    def to(self, device: str):
+        """Move pipeline to specified device"""
+        self.device = device
+        if self.pipeline:
+            if device == "cuda" and self.enable_cpu_offload:
+                self.pipeline.enable_sequential_cpu_offload()
+            else:
+                self.pipeline = self.pipeline.to(device)
+    
+    def __del__(self):
+        """Cleanup when object is destroyed"""
+        if self.pipeline:
+            del self.pipeline
+        gc.collect()
+        if torch.cuda.is_available():
+            torch.cuda.empty_cache()
\ No newline at end of file
diff --git a/models/model_3d_generator.py b/models/model_3d_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b9f0ea798e7b29a91ab771447d94047f11ede8d
--- /dev/null
+++ b/models/model_3d_generator.py
@@ -0,0 +1,283 @@
+import torch
+import numpy as np
+from PIL import Image
+import trimesh
+import tempfile
+from typing import Union, Optional, Dict, Any
+from pathlib import Path
+import os
+
+class Hunyuan3DGenerator:
+    """3D model generation using Hunyuan3D-2.1"""
+    
+    def __init__(self, device: str = "cuda"):
+        self.device = device if torch.cuda.is_available() else "cpu"
+        self.model = None
+        self.preprocessor = None
+        
+        # Model configuration
+        self.model_id = "tencent/Hunyuan3D-2.1"
+        self.lite_model_id = "tencent/Hunyuan3D-2.1-Lite"  # For low VRAM
+        
+        # Generation parameters
+        self.num_inference_steps = 50
+        self.guidance_scale = 7.5
+        self.resolution = 256  # 3D resolution
+        
+        # Use lite model for low VRAM
+        self.use_lite = self.device == "cpu" or not self._check_vram()
+    
+    def _check_vram(self) -> bool:
+        """Check if we have enough VRAM for full model"""
+        if not torch.cuda.is_available():
+            return False
+        
+        try:
+            vram = torch.cuda.get_device_properties(0).total_memory
+            # Need at least 12GB for full model
+            return vram > 12 * 1024 * 1024 * 1024
+        except:
+            return False
+    
+    def load_model(self):
+        """Lazy load the 3D generation model"""
+        if self.model is None:
+            try:
+                # Import Hunyuan3D components
+                from transformers import AutoModel, AutoProcessor
+                
+                model_id = self.lite_model_id if self.use_lite else self.model_id
+                
+                # Load preprocessor
+                self.preprocessor = AutoProcessor.from_pretrained(model_id)
+                
+                # Load model with optimizations
+                torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
+                
+                self.model = AutoModel.from_pretrained(
+                    model_id,
+                    torch_dtype=torch_dtype,
+                    low_cpu_mem_usage=True,
+                    device_map="auto" if self.device == "cuda" else None
+                )
+                
+                if self.device == "cpu":
+                    self.model = self.model.to(self.device)
+                
+                # Enable optimizations
+                if hasattr(self.model, 'enable_attention_slicing'):
+                    self.model.enable_attention_slicing()
+                
+            except Exception as e:
+                print(f"Failed to load Hunyuan3D model: {e}")
+                # Model loading failed, will use fallback
+                self.model = "fallback"
+    
+    def image_to_3d(self, 
+                   image: Union[str, Image.Image, np.ndarray],
+                   remove_background: bool = True,
+                   texture_resolution: int = 1024) -> Union[str, trimesh.Trimesh]:
+        """Convert 2D image to 3D model"""
+        
+        try:
+            # Load model if needed
+            if self.model is None:
+                self.load_model()
+            
+            # If model loading failed, use fallback
+            if self.model == "fallback":
+                return self._generate_fallback_3d(image)
+            
+            # Prepare image
+            if isinstance(image, str):
+                image = Image.open(image)
+            elif isinstance(image, np.ndarray):
+                image = Image.fromarray(image)
+            
+            # Ensure RGB
+            if image.mode != 'RGB':
+                image = image.convert('RGB')
+            
+            # Resize for processing
+            image = image.resize((512, 512), Image.Resampling.LANCZOS)
+            
+            # Remove background if requested
+            if remove_background:
+                image = self._remove_background(image)
+            
+            # Process with model
+            with torch.no_grad():
+                # Preprocess image
+                inputs = self.preprocessor(images=image, return_tensors="pt").to(self.device)
+                
+                # Generate 3D
+                outputs = self.model.generate(
+                    **inputs,
+                    num_inference_steps=self.num_inference_steps,
+                    guidance_scale=self.guidance_scale,
+                    texture_resolution=texture_resolution
+                )
+                
+                # Extract mesh
+                mesh = self._extract_mesh(outputs)
+            
+            # Save mesh
+            mesh_path = self._save_mesh(mesh)
+            
+            return mesh_path
+            
+        except Exception as e:
+            print(f"3D generation error: {e}")
+            return self._generate_fallback_3d(image)
+    
+    def _remove_background(self, image: Image.Image) -> Image.Image:
+        """Remove background from image"""
+        try:
+            # Try using rembg if available
+            from rembg import remove
+            return remove(image)
+        except:
+            # Fallback: simple background removal
+            # Convert to RGBA
+            image = image.convert("RGBA")
+            
+            # Simple white background removal
+            datas = image.getdata()
+            new_data = []
+            
+            for item in datas:
+                # Remove white-ish backgrounds
+                if item[0] > 230 and item[1] > 230 and item[2] > 230:
+                    new_data.append((255, 255, 255, 0))
+                else:
+                    new_data.append(item)
+            
+            image.putdata(new_data)
+            return image
+    
+    def _extract_mesh(self, model_outputs: Dict[str, Any]) -> trimesh.Trimesh:
+        """Extract mesh from model outputs"""
+        # This would depend on actual Hunyuan3D output format
+        # Placeholder implementation
+        
+        if 'vertices' in model_outputs and 'faces' in model_outputs:
+            vertices = model_outputs['vertices'].cpu().numpy()
+            faces = model_outputs['faces'].cpu().numpy()
+            
+            # Create trimesh object
+            mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
+            
+            # Add texture if available
+            if 'texture' in model_outputs:
+                # Apply texture to mesh
+                pass
+            
+            return mesh
+        else:
+            # Create a simple mesh if outputs are different
+            return self._create_simple_mesh()
+    
+    def _create_simple_mesh(self) -> trimesh.Trimesh:
+        """Create a simple placeholder mesh"""
+        # Create a simple sphere as placeholder
+        mesh = trimesh.creation.icosphere(subdivisions=3, radius=1.0)
+        
+        # Add some variation
+        mesh.vertices += np.random.normal(0, 0.05, mesh.vertices.shape)
+        
+        # Smooth the mesh
+        mesh = mesh.smoothed()
+        
+        return mesh
+    
+    def _generate_fallback_3d(self, image: Union[Image.Image, np.ndarray]) -> str:
+        """Generate fallback 3D model when main model fails"""
+        
+        # Create a simple 3D representation based on image
+        if isinstance(image, np.ndarray):
+            image = Image.fromarray(image)
+        elif isinstance(image, str):
+            image = Image.open(image)
+        
+        # Analyze image for basic shape
+        image_array = np.array(image.resize((64, 64)))
+        
+        # Create height map from image brightness
+        gray = np.mean(image_array, axis=2)
+        height_map = gray / 255.0
+        
+        # Create mesh from height map
+        mesh = self._heightmap_to_mesh(height_map)
+        
+        # Save and return path
+        return self._save_mesh(mesh)
+    
+    def _heightmap_to_mesh(self, heightmap: np.ndarray) -> trimesh.Trimesh:
+        """Convert heightmap to 3D mesh"""
+        h, w = heightmap.shape
+        
+        # Create vertices
+        vertices = []
+        faces = []
+        
+        # Create vertex grid
+        for i in range(h):
+            for j in range(w):
+                x = (j - w/2) / w * 2
+                y = (i - h/2) / h * 2
+                z = heightmap[i, j] * 0.5
+                vertices.append([x, y, z])
+        
+        # Create faces
+        for i in range(h-1):
+            for j in range(w-1):
+                # Two triangles per grid square
+                v1 = i * w + j
+                v2 = v1 + 1
+                v3 = v1 + w
+                v4 = v3 + 1
+                
+                faces.append([v1, v2, v3])
+                faces.append([v2, v4, v3])
+        
+        vertices = np.array(vertices)
+        faces = np.array(faces)
+        
+        # Create mesh
+        mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
+        
+        # Apply smoothing
+        mesh = mesh.smoothed()
+        
+        return mesh
+    
+    def _save_mesh(self, mesh: trimesh.Trimesh) -> str:
+        """Save mesh to file"""
+        # Create temporary file
+        with tempfile.NamedTemporaryFile(suffix='.glb', delete=False) as tmp:
+            mesh_path = tmp.name
+        
+        # Export mesh
+        mesh.export(mesh_path)
+        
+        return mesh_path
+    
+    def text_to_3d(self, text_prompt: str) -> str:
+        """Generate 3D model from text description"""
+        # First generate image, then convert to 3D
+        # This would require image generator integration
+        raise NotImplementedError("Text to 3D requires image generation first")
+    
+    def to(self, device: str):
+        """Move model to specified device"""
+        self.device = device
+        if self.model and self.model != "fallback":
+            self.model.to(device)
+    
+    def __del__(self):
+        """Cleanup when object is destroyed"""
+        if self.model and self.model != "fallback":
+            del self.model
+        if self.preprocessor:
+            del self.preprocessor
+        torch.cuda.empty_cache()
\ No newline at end of file
diff --git a/models/rigging_processor.py b/models/rigging_processor.py
new file mode 100644
index 0000000000000000000000000000000000000000..0747e8e0296f76525b50ac5145fa2a03fe552774
--- /dev/null
+++ b/models/rigging_processor.py
@@ -0,0 +1,546 @@
+import numpy as np
+import trimesh
+from typing import Union, Dict, List, Tuple, Optional
+import tempfile
+from pathlib import Path
+
+class UniRigProcessor:
+    """Automatic rigging for 3D models using simplified UniRig approach"""
+    
+    def __init__(self, device: str = "cuda"):
+        self.device = device
+        self.model = None
+        
+        # Rigging parameters
+        self.bone_detection_threshold = 0.1
+        self.max_bones = 20
+        self.min_bones = 5
+        
+        # Animation presets for monsters
+        self.animation_presets = {
+            'idle': self._create_idle_animation,
+            'walk': self._create_walk_animation,
+            'attack': self._create_attack_animation,
+            'happy': self._create_happy_animation
+        }
+    
+    def load_model(self):
+        """Load rigging model (placeholder for actual implementation)"""
+        # In production, this would load the actual UniRig model
+        # For now, we'll use procedural rigging
+        self.model = "procedural"
+    
+    def rig_mesh(self, 
+                mesh: Union[str, trimesh.Trimesh],
+                mesh_type: str = "monster") -> Dict[str, any]:
+        """Add rigging to a 3D mesh"""
+        
+        try:
+            # Load mesh if path provided
+            if isinstance(mesh, str):
+                mesh = trimesh.load(mesh)
+            
+            # Ensure model is loaded
+            if self.model is None:
+                self.load_model()
+            
+            # Analyze mesh structure
+            mesh_analysis = self._analyze_mesh(mesh)
+            
+            # Generate skeleton
+            skeleton = self._generate_skeleton(mesh, mesh_analysis)
+            
+            # Compute bone weights
+            weights = self._compute_bone_weights(mesh, skeleton)
+            
+            # Create rigged model
+            rigged_model = {
+                'mesh': mesh,
+                'skeleton': skeleton,
+                'weights': weights,
+                'animations': self._create_default_animations(skeleton),
+                'metadata': {
+                    'mesh_type': mesh_type,
+                    'bone_count': len(skeleton['bones']),
+                    'vertex_count': len(mesh.vertices)
+                }
+            }
+            
+            # Save rigged model
+            output_path = self._save_rigged_model(rigged_model)
+            
+            return output_path
+            
+        except Exception as e:
+            print(f"Rigging error: {e}")
+            # Return original mesh if rigging fails
+            return self._save_mesh_without_rigging(mesh)
+    
+    def _analyze_mesh(self, mesh: trimesh.Trimesh) -> Dict[str, any]:
+        """Analyze mesh structure for rigging"""
+        
+        # Get mesh bounds and center
+        bounds = mesh.bounds
+        center = mesh.centroid
+        
+        # Analyze mesh topology
+        analysis = {
+            'bounds': bounds,
+            'center': center,
+            'height': bounds[1][2] - bounds[0][2],
+            'width': bounds[1][0] - bounds[0][0],
+            'depth': bounds[1][1] - bounds[0][1],
+            'is_symmetric': self._check_symmetry(mesh),
+            'detected_limbs': self._detect_limbs(mesh),
+            'mesh_type': self._classify_mesh_type(mesh)
+        }
+        
+        return analysis
+    
+    def _check_symmetry(self, mesh: trimesh.Trimesh) -> bool:
+        """Check if mesh is roughly symmetric"""
+        # Simple check: compare left and right halves
+        vertices = mesh.vertices
+        center_x = mesh.centroid[0]
+        
+        left_verts = vertices[vertices[:, 0] < center_x]
+        right_verts = vertices[vertices[:, 0] > center_x]
+        
+        # Check if similar number of vertices on each side
+        ratio = len(left_verts) / (len(right_verts) + 1)
+        return 0.8 < ratio < 1.2
+    
+    def _detect_limbs(self, mesh: trimesh.Trimesh) -> List[Dict]:
+        """Detect potential limbs in the mesh"""
+        # Simplified limb detection using vertex clustering
+        from sklearn.cluster import DBSCAN
+        
+        limbs = []
+        
+        try:
+            # Cluster vertices to find distinct parts
+            clustering = DBSCAN(eps=0.1, min_samples=10).fit(mesh.vertices)
+            
+            # Analyze each cluster
+            for label in set(clustering.labels_):
+                if label == -1:  # Noise
+                    continue
+                
+                cluster_verts = mesh.vertices[clustering.labels_ == label]
+                
+                # Check if cluster could be a limb
+                cluster_bounds = np.array([cluster_verts.min(axis=0), cluster_verts.max(axis=0)])
+                dimensions = cluster_bounds[1] - cluster_bounds[0]
+                
+                # Limbs are typically elongated
+                if max(dimensions) / (min(dimensions) + 0.001) > 2:
+                    limbs.append({
+                        'center': cluster_verts.mean(axis=0),
+                        'direction': dimensions,
+                        'size': len(cluster_verts)
+                    })
+        except:
+            # Fallback if clustering fails
+            pass
+        
+        return limbs
+    
+    def _classify_mesh_type(self, mesh: trimesh.Trimesh) -> str:
+        """Classify the type of creature mesh"""
+        analysis = {
+            'height': mesh.bounds[1][2] - mesh.bounds[0][2],
+            'width': mesh.bounds[1][0] - mesh.bounds[0][0],
+            'depth': mesh.bounds[1][1] - mesh.bounds[0][1]
+        }
+        
+        # Simple classification based on proportions
+        aspect_ratio = analysis['height'] / max(analysis['width'], analysis['depth'])
+        
+        if aspect_ratio > 1.5:
+            return 'bipedal'  # Tall creatures
+        elif aspect_ratio < 0.7:
+            return 'quadruped'  # Wide creatures
+        else:
+            return 'hybrid'  # Mixed proportions
+    
+    def _generate_skeleton(self, mesh: trimesh.Trimesh, analysis: Dict) -> Dict:
+        """Generate skeleton for the mesh"""
+        
+        skeleton = {
+            'bones': [],
+            'hierarchy': {},
+            'bind_poses': []
+        }
+        
+        # Create root bone at center
+        root_pos = analysis['center']
+        root_bone = {
+            'id': 0,
+            'name': 'root',
+            'position': root_pos,
+            'parent': -1,
+            'children': []
+        }
+        skeleton['bones'].append(root_bone)
+        
+        # Generate bones based on mesh type
+        mesh_type = analysis['mesh_type']
+        
+        if mesh_type == 'bipedal':
+            skeleton = self._generate_bipedal_skeleton(mesh, skeleton, analysis)
+        elif mesh_type == 'quadruped':
+            skeleton = self._generate_quadruped_skeleton(mesh, skeleton, analysis)
+        else:
+            skeleton = self._generate_hybrid_skeleton(mesh, skeleton, analysis)
+        
+        # Build hierarchy
+        for bone in skeleton['bones']:
+            if bone['parent'] >= 0:
+                skeleton['bones'][bone['parent']]['children'].append(bone['id'])
+        
+        return skeleton
+    
+    def _generate_bipedal_skeleton(self, mesh: trimesh.Trimesh, skeleton: Dict, analysis: Dict) -> Dict:
+        """Generate skeleton for bipedal creature"""
+        
+        bounds = analysis['bounds']
+        center = analysis['center']
+        height = analysis['height']
+        
+        # Spine bones
+        spine_positions = [
+            center + [0, 0, -height * 0.4],  # Hips
+            center + [0, 0, 0],               # Chest
+            center + [0, 0, height * 0.3]     # Head
+        ]
+        
+        parent_id = 0
+        for i, pos in enumerate(spine_positions):
+            bone = {
+                'id': len(skeleton['bones']),
+                'name': ['hips', 'chest', 'head'][i],
+                'position': pos,
+                'parent': parent_id,
+                'children': []
+            }
+            skeleton['bones'].append(bone)
+            parent_id = bone['id']
+        
+        # Add limbs
+        chest_id = skeleton['bones'][2]['id']  # Chest bone
+        hips_id = skeleton['bones'][1]['id']   # Hips bone
+        
+        # Arms
+        arm_offset = analysis['width'] * 0.4
+        for side, offset in [('left', -arm_offset), ('right', arm_offset)]:
+            shoulder_pos = skeleton['bones'][chest_id]['position'] + [offset, 0, 0]
+            elbow_pos = shoulder_pos + [offset * 0.5, 0, -height * 0.2]
+            
+            # Shoulder
+            shoulder = {
+                'id': len(skeleton['bones']),
+                'name': f'{side}_shoulder',
+                'position': shoulder_pos,
+                'parent': chest_id,
+                'children': []
+            }
+            skeleton['bones'].append(shoulder)
+            
+            # Elbow/Hand
+            hand = {
+                'id': len(skeleton['bones']),
+                'name': f'{side}_hand',
+                'position': elbow_pos,
+                'parent': shoulder['id'],
+                'children': []
+            }
+            skeleton['bones'].append(hand)
+        
+        # Legs
+        for side, offset in [('left', -arm_offset * 0.5), ('right', arm_offset * 0.5)]:
+            hip_pos = skeleton['bones'][hips_id]['position'] + [offset, 0, 0]
+            foot_pos = hip_pos + [0, 0, -height * 0.4]
+            
+            # Leg
+            leg = {
+                'id': len(skeleton['bones']),
+                'name': f'{side}_leg',
+                'position': hip_pos,
+                'parent': hips_id,
+                'children': []
+            }
+            skeleton['bones'].append(leg)
+            
+            # Foot
+            foot = {
+                'id': len(skeleton['bones']),
+                'name': f'{side}_foot',
+                'position': foot_pos,
+                'parent': leg['id'],
+                'children': []
+            }
+            skeleton['bones'].append(foot)
+        
+        return skeleton
+    
+    def _generate_quadruped_skeleton(self, mesh: trimesh.Trimesh, skeleton: Dict, analysis: Dict) -> Dict:
+        """Generate skeleton for quadruped creature"""
+        
+        # Similar to bipedal but with 4 legs and horizontal spine
+        center = analysis['center']
+        width = analysis['width']
+        depth = analysis['depth']
+        
+        # Spine (horizontal)
+        spine_positions = [
+            center + [-width * 0.3, 0, 0],  # Tail
+            center,                          # Body
+            center + [width * 0.3, 0, 0]    # Head
+        ]
+        
+        parent_id = 0
+        for i, pos in enumerate(spine_positions):
+            bone = {
+                'id': len(skeleton['bones']),
+                'name': ['tail', 'body', 'head'][i],
+                'position': pos,
+                'parent': parent_id,
+                'children': []
+            }
+            skeleton['bones'].append(bone)
+            parent_id = bone['id'] if i < 2 else skeleton['bones'][1]['id']
+        
+        # Add 4 legs
+        body_id = skeleton['bones'][1]['id']
+        
+        for front_back, x_offset in [('front', width * 0.2), ('back', -width * 0.2)]:
+            for side, z_offset in [('left', -depth * 0.3), ('right', depth * 0.3)]:
+                leg_pos = skeleton['bones'][body_id]['position'] + [x_offset, -analysis['height'] * 0.3, z_offset]
+                
+                leg = {
+                    'id': len(skeleton['bones']),
+                    'name': f'{front_back}_{side}_leg',
+                    'position': leg_pos,
+                    'parent': body_id,
+                    'children': []
+                }
+                skeleton['bones'].append(leg)
+        
+        return skeleton
+    
+    def _generate_hybrid_skeleton(self, mesh: trimesh.Trimesh, skeleton: Dict, analysis: Dict) -> Dict:
+        """Generate skeleton for hybrid creature"""
+        # Mix of bipedal and quadruped features
+        # For simplicity, use bipedal as base
+        return self._generate_bipedal_skeleton(mesh, skeleton, analysis)
+    
+    def _compute_bone_weights(self, mesh: trimesh.Trimesh, skeleton: Dict) -> np.ndarray:
+        """Compute bone weights for vertices"""
+        
+        num_vertices = len(mesh.vertices)
+        num_bones = len(skeleton['bones'])
+        
+        # Initialize weights matrix
+        weights = np.zeros((num_vertices, num_bones))
+        
+        # For each vertex, compute influence from each bone
+        for v_idx, vertex in enumerate(mesh.vertices):
+            total_weight = 0
+            
+            for b_idx, bone in enumerate(skeleton['bones']):
+                # Distance-based weight
+                distance = np.linalg.norm(vertex - bone['position'])
+                
+                # Inverse distance weight with falloff
+                weight = 1.0 / (distance + 0.1)
+                weights[v_idx, b_idx] = weight
+                total_weight += weight
+            
+            # Normalize weights
+            if total_weight > 0:
+                weights[v_idx] /= total_weight
+            
+            # Keep only top 4 influences per vertex (standard for game engines)
+            top_4 = np.argsort(weights[v_idx])[-4:]
+            mask = np.zeros(num_bones, dtype=bool)
+            mask[top_4] = True
+            weights[v_idx, ~mask] = 0
+            
+            # Re-normalize
+            if weights[v_idx].sum() > 0:
+                weights[v_idx] /= weights[v_idx].sum()
+        
+        return weights
+    
+    def _create_default_animations(self, skeleton: Dict) -> Dict[str, List]:
+        """Create default animations for the skeleton"""
+        
+        animations = {}
+        
+        # Create basic animation sets
+        for anim_name, anim_func in self.animation_presets.items():
+            animations[anim_name] = anim_func(skeleton)
+        
+        return animations
+    
+    def _create_idle_animation(self, skeleton: Dict) -> List[Dict]:
+        """Create idle animation keyframes"""
+        
+        keyframes = []
+        
+        # Simple breathing/bobbing motion
+        for t in np.linspace(0, 2 * np.pi, 30):
+            frame = {
+                'time': t / (2 * np.pi),
+                'bones': {}
+            }
+            
+            # Subtle movement for each bone
+            for bone in skeleton['bones']:
+                if 'chest' in bone['name'] or 'body' in bone['name']:
+                    # Breathing motion
+                    offset = np.sin(t) * 0.02
+                    frame['bones'][bone['id']] = {
+                        'position': bone['position'] + [0, offset, 0],
+                        'rotation': [0, 0, 0, 1]  # Quaternion
+                    }
+                else:
+                    # No movement
+                    frame['bones'][bone['id']] = {
+                        'position': bone['position'],
+                        'rotation': [0, 0, 0, 1]
+                    }
+            
+            keyframes.append(frame)
+        
+        return keyframes
+    
+    def _create_walk_animation(self, skeleton: Dict) -> List[Dict]:
+        """Create walk animation keyframes"""
+        # Simplified walk cycle
+        keyframes = []
+        
+        for t in np.linspace(0, 2 * np.pi, 60):
+            frame = {
+                'time': t / (2 * np.pi),
+                'bones': {}
+            }
+            
+            # Animate legs with sine waves
+            for bone in skeleton['bones']:
+                if 'leg' in bone['name'] or 'foot' in bone['name']:
+                    # Alternating leg movement
+                    phase = 0 if 'left' in bone['name'] else np.pi
+                    offset = np.sin(t + phase) * 0.1
+                    
+                    frame['bones'][bone['id']] = {
+                        'position': bone['position'] + [offset, 0, 0],
+                        'rotation': [0, 0, 0, 1]
+                    }
+                else:
+                    frame['bones'][bone['id']] = {
+                        'position': bone['position'],
+                        'rotation': [0, 0, 0, 1]
+                    }
+            
+            keyframes.append(frame)
+        
+        return keyframes
+    
+    def _create_attack_animation(self, skeleton: Dict) -> List[Dict]:
+        """Create attack animation keyframes"""
+        # Quick strike motion
+        keyframes = []
+        
+        # Wind up
+        for t in np.linspace(0, 0.3, 10):
+            frame = {'time': t, 'bones': {}}
+            for bone in skeleton['bones']:
+                frame['bones'][bone['id']] = {
+                    'position': bone['position'],
+                    'rotation': [0, 0, 0, 1]
+                }
+            keyframes.append(frame)
+        
+        # Strike
+        for t in np.linspace(0.3, 0.5, 5):
+            frame = {'time': t, 'bones': {}}
+            for bone in skeleton['bones']:
+                if 'hand' in bone['name'] or 'head' in bone['name']:
+                    # Forward motion
+                    offset = (t - 0.3) * 0.5
+                    frame['bones'][bone['id']] = {
+                        'position': bone['position'] + [offset, 0, 0],
+                        'rotation': [0, 0, 0, 1]
+                    }
+                else:
+                    frame['bones'][bone['id']] = {
+                        'position': bone['position'],
+                        'rotation': [0, 0, 0, 1]
+                    }
+            keyframes.append(frame)
+        
+        # Return
+        for t in np.linspace(0.5, 1.0, 10):
+            frame = {'time': t, 'bones': {}}
+            for bone in skeleton['bones']:
+                frame['bones'][bone['id']] = {
+                    'position': bone['position'],
+                    'rotation': [0, 0, 0, 1]
+                }
+            keyframes.append(frame)
+        
+        return keyframes
+    
+    def _create_happy_animation(self, skeleton: Dict) -> List[Dict]:
+        """Create happy/excited animation keyframes"""
+        # Jumping or bouncing motion
+        keyframes = []
+        
+        for t in np.linspace(0, 2 * np.pi, 40):
+            frame = {
+                'time': t / (2 * np.pi),
+                'bones': {}
+            }
+            
+            # Bouncing motion
+            bounce = abs(np.sin(t * 2)) * 0.1
+            
+            for bone in skeleton['bones']:
+                frame['bones'][bone['id']] = {
+                    'position': bone['position'] + [0, bounce, 0],
+                    'rotation': [0, 0, 0, 1]
+                }
+            
+            keyframes.append(frame)
+        
+        return keyframes
+    
+    def _save_rigged_model(self, rigged_model: Dict) -> str:
+        """Save rigged model to file"""
+        
+        # Create temporary file
+        with tempfile.NamedTemporaryFile(suffix='.glb', delete=False) as tmp:
+            output_path = tmp.name
+        
+        # In production, this would export the rigged model with animations
+        # For now, just save the mesh
+        rigged_model['mesh'].export(output_path)
+        
+        return output_path
+    
+    def _save_mesh_without_rigging(self, mesh: Union[str, trimesh.Trimesh]) -> str:
+        """Save mesh without rigging as fallback"""
+        
+        if isinstance(mesh, str):
+            return mesh
+        
+        with tempfile.NamedTemporaryFile(suffix='.glb', delete=False) as tmp:
+            output_path = tmp.name
+        
+        mesh.export(output_path)
+        return output_path
+    
+    def to(self, device: str):
+        """Move model to specified device (compatibility method)"""
+        self.device = device
\ No newline at end of file
diff --git a/models/stt_processor.py b/models/stt_processor.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac423dc1ad6fc7053c5949532c672f37079717ff
--- /dev/null
+++ b/models/stt_processor.py
@@ -0,0 +1,154 @@
+import torch
+import torchaudio
+from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor
+import numpy as np
+from typing import Optional, Union
+import librosa
+import soundfile as sf
+import os
+
+class KyutaiSTTProcessor:
+    """Processor for Kyutai Speech-to-Text model"""
+    
+    def __init__(self, device: str = "cuda"):
+        self.device = device if torch.cuda.is_available() else "cpu"
+        self.model = None
+        self.processor = None
+        self.model_id = "kyutai/stt-2.6b-en"  # English-only model for better accuracy
+        
+        # Audio processing parameters
+        self.sample_rate = 16000
+        self.chunk_length_s = 30  # Process in 30-second chunks
+        self.max_duration = 120  # Maximum 2 minutes of audio
+    
+    def load_model(self):
+        """Lazy load the STT model"""
+        if self.model is None:
+            try:
+                # Load processor and model
+                self.processor = AutoProcessor.from_pretrained(self.model_id)
+                
+                # Model configuration for low VRAM usage
+                torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
+                
+                self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
+                    self.model_id,
+                    torch_dtype=torch_dtype,
+                    low_cpu_mem_usage=True,
+                    use_safetensors=True
+                )
+                
+                self.model.to(self.device)
+                
+                # Enable better generation settings
+                self.model.generation_config.language = "english"
+                self.model.generation_config.task = "transcribe"
+                self.model.generation_config.forced_decoder_ids = None
+                
+            except Exception as e:
+                print(f"Failed to load STT model: {e}")
+                raise
+    
+    def preprocess_audio(self, audio_path: str) -> np.ndarray:
+        """Preprocess audio file for transcription"""
+        try:
+            # Load audio file
+            audio, sr = librosa.load(audio_path, sr=None, mono=True)
+            
+            # Resample if necessary
+            if sr != self.sample_rate:
+                audio = librosa.resample(audio, orig_sr=sr, target_sr=self.sample_rate)
+            
+            # Limit duration
+            max_samples = self.max_duration * self.sample_rate
+            if len(audio) > max_samples:
+                audio = audio[:max_samples]
+            
+            # Normalize audio
+            audio = audio / np.max(np.abs(audio) + 1e-7)
+            
+            return audio
+            
+        except Exception as e:
+            print(f"Error preprocessing audio: {e}")
+            raise
+    
+    def transcribe(self, audio_input: Union[str, np.ndarray]) -> str:
+        """Transcribe audio to text"""
+        try:
+            # Load model if not already loaded
+            self.load_model()
+            
+            # Process audio input
+            if isinstance(audio_input, str):
+                audio = self.preprocess_audio(audio_input)
+            else:
+                audio = audio_input
+            
+            # Process with model
+            inputs = self.processor(
+                audio, 
+                sampling_rate=self.sample_rate, 
+                return_tensors="pt"
+            ).to(self.device)
+            
+            # Generate transcription
+            with torch.no_grad():
+                generated_ids = self.model.generate(
+                    inputs["input_features"],
+                    max_new_tokens=128,
+                    do_sample=False,
+                    num_beams=1  # Greedy decoding for speed
+                )
+            
+            # Decode transcription
+            transcription = self.processor.batch_decode(
+                generated_ids, 
+                skip_special_tokens=True,
+                clean_up_tokenization_spaces=True
+            )[0]
+            
+            # Clean up transcription
+            transcription = self._clean_transcription(transcription)
+            
+            return transcription
+            
+        except Exception as e:
+            print(f"Transcription error: {e}")
+            # Return a default description on error
+            return "Create a unique digital monster companion"
+    
+    def _clean_transcription(self, text: str) -> str:
+        """Clean up transcription output"""
+        # Remove extra whitespace
+        text = " ".join(text.split())
+        
+        # Ensure proper capitalization
+        if text and text[0].islower():
+            text = text[0].upper() + text[1:]
+        
+        # Add period if missing
+        if text and not text[-1] in '.!?':
+            text += '.'
+        
+        return text
+    
+    def transcribe_streaming(self, audio_stream):
+        """Streaming transcription (for future implementation)"""
+        # This would handle real-time audio streams
+        # For now, return placeholder
+        raise NotImplementedError("Streaming transcription not yet implemented")
+    
+    def to(self, device: str):
+        """Move model to specified device"""
+        self.device = device
+        if self.model:
+            self.model.to(device)
+    
+    def __del__(self):
+        """Cleanup when object is destroyed"""
+        if self.model:
+            del self.model
+        if self.processor:
+            del self.processor
+        torch.cuda.empty_cache()
\ No newline at end of file
diff --git a/models/text_generator.py b/models/text_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b519dc46b18a41e0b155e4c1f2dd20c43279916
--- /dev/null
+++ b/models/text_generator.py
@@ -0,0 +1,299 @@
+import torch
+from transformers import AutoModelForCausalLM, AutoTokenizer
+import json
+import random
+from typing import Dict, Any, List
+
+class QwenTextGenerator:
+    """Text generation using Qwen2.5-0.5B-Instruct for monster traits and dialogue"""
+    
+    def __init__(self, device: str = "cuda"):
+        self.device = device if torch.cuda.is_available() else "cpu"
+        self.model = None
+        self.tokenizer = None
+        self.model_id = "Qwen/Qwen2.5-0.5B-Instruct"
+        
+        # Generation parameters
+        self.max_new_tokens = 150
+        self.temperature = 0.8
+        self.top_p = 0.9
+        
+        # Monster trait templates
+        self.trait_categories = {
+            'elements': ['fire', 'water', 'earth', 'wind', 'electric', 'ice', 'nature', 'dark', 'light', 'neutral'],
+            'personalities': ['brave', 'timid', 'aggressive', 'gentle', 'playful', 'serious', 'loyal', 'independent', 'curious', 'protective'],
+            'body_types': ['bipedal', 'quadruped', 'serpentine', 'avian', 'aquatic', 'insectoid', 'humanoid', 'amorphous'],
+            'sizes': ['tiny', 'small', 'medium', 'large', 'giant'],
+            'special_features': ['wings', 'horns', 'tail', 'spikes', 'fur', 'scales', 'armor', 'crystals', 'flames', 'aura']
+        }
+    
+    def load_model(self):
+        """Lazy load the text generation model"""
+        if self.model is None:
+            try:
+                # Load tokenizer
+                self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
+                
+                # Model configuration
+                torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
+                
+                self.model = AutoModelForCausalLM.from_pretrained(
+                    self.model_id,
+                    torch_dtype=torch_dtype,
+                    device_map="auto" if self.device == "cuda" else None,
+                    low_cpu_mem_usage=True
+                )
+                
+                if self.device == "cpu":
+                    self.model.to(self.device)
+                
+            except Exception as e:
+                print(f"Failed to load text generation model: {e}")
+                raise
+    
+    def generate_traits(self, description: str) -> Dict[str, Any]:
+        """Generate monster traits from description"""
+        try:
+            self.load_model()
+            
+            # Create prompt for trait generation
+            prompt = self._create_trait_prompt(description)
+            
+            # Generate response
+            inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
+            
+            with torch.no_grad():
+                outputs = self.model.generate(
+                    **inputs,
+                    max_new_tokens=self.max_new_tokens,
+                    temperature=self.temperature,
+                    top_p=self.top_p,
+                    do_sample=True,
+                    pad_token_id=self.tokenizer.eos_token_id
+                )
+            
+            response = self.tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
+            
+            # Parse traits from response
+            traits = self._parse_traits(response, description)
+            
+            return traits
+            
+        except Exception as e:
+            print(f"Error generating traits: {e}")
+            return self._generate_fallback_traits(description)
+    
+    def generate_dialogue(self, traits: Dict[str, Any]) -> str:
+        """Generate monster dialogue (emoji + numbers)"""
+        try:
+            # Create emoji dialogue based on personality and mood
+            personality = traits.get('personality', 'neutral')
+            
+            # Emoji mapping for personalities
+            emoji_map = {
+                'brave': ['💪', '🔥', '⚔️', '🛡️'],
+                'timid': ['😰', '🥺', '💦', '❓'],
+                'aggressive': ['😤', '💢', '🔥', '⚡'],
+                'gentle': ['💚', '🌸', '✨', '🌟'],
+                'playful': ['😊', '🎮', '🎯', '🎪'],
+                'serious': ['🤖', '📊', '⚡', '💯'],
+                'loyal': ['💖', '🤝', '🛡️', '⭐'],
+                'independent': ['🚀', '🌍', '🔮', '💫'],
+                'curious': ['🔍', '❓', '💡', '🌟'],
+                'protective': ['🛡️', '💪', '🏰', '⚔️']
+            }
+            
+            # Get appropriate emojis
+            emojis = emoji_map.get(personality, ['🤖', '💚', '✨'])
+            selected_emojis = random.sample(emojis, min(2, len(emojis)))
+            
+            # Generate status numbers (representing monster's current state)
+            hp_percent = random.randint(70, 100)
+            happiness = random.randint(60, 95)
+            energy = random.randint(50, 90)
+            
+            # Create dialogue
+            dialogue = f"{selected_emojis[0]}{selected_emojis[1] if len(selected_emojis) > 1 else '💚'}"
+            dialogue += f"{hp_percent}️⃣{happiness}️⃣"
+            
+            return dialogue
+            
+        except Exception as e:
+            print(f"Error generating dialogue: {e}")
+            return "🤖💚9️⃣0️⃣"
+    
+    def _create_trait_prompt(self, description: str) -> str:
+        """Create prompt for trait generation"""
+        prompt = f"""<|im_start|>system
+You are a creative game designer creating unique digital monsters. Generate detailed traits for a monster based on the description.
+<|im_end|>
+<|im_start|>user
+Create traits for this monster: {description}
+
+Include: name, species, element, personality, appearance details, and special abilities.
+<|im_end|>
+<|im_start|>assistant
+"""
+        return prompt
+    
+    def _parse_traits(self, response: str, original_description: str) -> Dict[str, Any]:
+        """Parse traits from model response"""
+        traits = {
+            'description': original_description,
+            'raw_response': response
+        }
+        
+        # Extract name
+        if "name:" in response.lower():
+            name_start = response.lower().find("name:") + 5
+            name_end = response.find("\n", name_start)
+            if name_end == -1:
+                name_end = len(response)
+            traits['name'] = response[name_start:name_end].strip()
+        else:
+            traits['name'] = self._generate_name()
+        
+        # Extract or assign element
+        element_found = False
+        for element in self.trait_categories['elements']:
+            if element in response.lower():
+                traits['element'] = element
+                element_found = True
+                break
+        
+        if not element_found:
+            traits['element'] = random.choice(self.trait_categories['elements'])
+        
+        # Extract or assign personality
+        personality_found = False
+        for personality in self.trait_categories['personalities']:
+            if personality in response.lower():
+                traits['personality'] = personality
+                personality_found = True
+                break
+        
+        if not personality_found:
+            traits['personality'] = random.choice(self.trait_categories['personalities'])
+        
+        # Extract appearance
+        traits['appearance'] = self._extract_appearance(response)
+        
+        # Extract abilities
+        traits['abilities'] = self._extract_abilities(response, traits['element'])
+        
+        # Add color scheme based on element
+        traits['color_scheme'] = self._get_color_scheme(traits['element'])
+        
+        return traits
+    
+    def _generate_name(self) -> str:
+        """Generate a random monster name"""
+        prefixes = ['Pyro', 'Aqua', 'Terra', 'Aero', 'Volt', 'Cryo', 'Flora', 'Shadow', 'Lumi', 'Neo']
+        suffixes = ['mon', 'beast', 'guard', 'wing', 'claw', 'fang', 'horn', 'tail', 'byte', 'spark']
+        
+        return random.choice(prefixes) + random.choice(suffixes)
+    
+    def _extract_appearance(self, response: str) -> str:
+        """Extract appearance description"""
+        appearance_keywords = ['appearance', 'looks like', 'resembles', 'body', 'color', 'size']
+        
+        for keyword in appearance_keywords:
+            if keyword in response.lower():
+                start = response.lower().find(keyword)
+                end = response.find('.', start)
+                if end == -1:
+                    end = response.find('\n', start)
+                if end == -1:
+                    end = len(response)
+                
+                return response[start:end].strip()
+        
+        # Fallback appearance
+        body_type = random.choice(self.trait_categories['body_types'])
+        size = random.choice(self.trait_categories['sizes'])
+        feature = random.choice(self.trait_categories['special_features'])
+        
+        return f"A {size} {body_type} creature with {feature}"
+    
+    def _extract_abilities(self, response: str, element: str) -> List[str]:
+        """Extract or generate abilities"""
+        abilities = []
+        
+        ability_keywords = ['ability', 'power', 'skill', 'can', 'capable']
+        for keyword in ability_keywords:
+            if keyword in response.lower():
+                # Try to extract abilities from response
+                start = response.lower().find(keyword)
+                end = response.find('.', start)
+                if end > start:
+                    ability_text = response[start:end]
+                    abilities.append(ability_text.strip())
+        
+        # If no abilities found, generate based on element
+        if not abilities:
+            element_abilities = {
+                'fire': ['Flame Burst', 'Heat Wave', 'Ember Shield'],
+                'water': ['Aqua Jet', 'Bubble Shield', 'Tidal Wave'],
+                'earth': ['Rock Throw', 'Earthquake', 'Stone Armor'],
+                'wind': ['Gust', 'Tornado', 'Wind Shield'],
+                'electric': ['Thunder Shock', 'Static Field', 'Lightning Speed'],
+                'ice': ['Ice Beam', 'Frost Armor', 'Blizzard'],
+                'nature': ['Vine Whip', 'Healing Bloom', 'Nature\'s Guard'],
+                'dark': ['Shadow Strike', 'Dark Pulse', 'Void Shield'],
+                'light': ['Light Beam', 'Healing Light', 'Radiant Shield'],
+                'neutral': ['Tackle', 'Defense Curl', 'Focus']
+            }
+            
+            abilities = random.sample(
+                element_abilities.get(element, element_abilities['neutral']), 
+                2
+            )
+        
+        return abilities
+    
+    def _get_color_scheme(self, element: str) -> str:
+        """Get color scheme based on element"""
+        color_schemes = {
+            'fire': 'red and orange with yellow accents',
+            'water': 'blue and cyan with white highlights',
+            'earth': 'brown and green with stone textures',
+            'wind': 'white and light blue with swirling patterns',
+            'electric': 'yellow and blue with sparking effects',
+            'ice': 'light blue and white with crystalline features',
+            'nature': 'green and brown with leaf patterns',
+            'dark': 'black and purple with shadow effects',
+            'light': 'white and gold with glowing aura',
+            'neutral': 'gray and silver with balanced tones'
+        }
+        
+        return color_schemes.get(element, 'varied colors with unique patterns')
+    
+    def _generate_fallback_traits(self, description: str) -> Dict[str, Any]:
+        """Generate fallback traits if model fails"""
+        element = random.choice(self.trait_categories['elements'])
+        personality = random.choice(self.trait_categories['personalities'])
+        
+        return {
+            'name': self._generate_name(),
+            'species': 'Digital Monster',
+            'element': element,
+            'personality': personality,
+            'appearance': f"A unique {random.choice(self.trait_categories['sizes'])} digital creature",
+            'color_scheme': self._get_color_scheme(element),
+            'abilities': self._extract_abilities("", element),
+            'description': description
+        }
+    
+    def to(self, device: str):
+        """Move model to specified device"""
+        self.device = device
+        if self.model:
+            self.model.to(device)
+    
+    def __del__(self):
+        """Cleanup when object is destroyed"""
+        if self.model:
+            del self.model
+        if self.tokenizer:
+            del self.tokenizer
+        torch.cuda.empty_cache()
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index c96e5f441e3e1c1f0641134d75266d2c0f860dd0..790879ad5f04ec7a3d4558476efeb562186b6b0a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,57 +1,48 @@
-# Core ML Framework - Latest optimized versions
-transformers>=4.52.4  # Latest stable, supports Qwen 2.5
-torch>=2.2.0  # PyTorch 2.0+ for torch.compile
-torchaudio>=2.2.0
-diffusers>=0.30.0  # For OmniGen and other diffusion models
-# gradio>=5.34.2  # Replaced with Streamlit
-
-# Qwen 2.5 Optimization Stack
-# auto-gptq>=0.7.1  # Removed - not needed, using BitsAndBytesConfig instead
-optimum>=1.16.0
-accelerate>=0.26.1
-bitsandbytes>=0.42.0
-# FlashAttention2 will be installed at runtime if GPU is available
-
-# Enhanced Audio Processing - Kyutai STT
+# Core dependencies
+gradio>=4.16.0
+spaces>=0.19.0
+
+# AI/ML frameworks
+torch>=2.1.0
+torchvision>=0.16.0
+torchaudio>=2.1.0
+transformers>=4.36.0
+diffusers>=0.24.0
+accelerate>=0.25.0
+bitsandbytes>=0.41.0
+
+# Model-specific dependencies
+huggingface-hub>=0.20.0
+safetensors>=0.4.1
+sentencepiece>=0.1.99
+tokenizers>=0.15.0
+
+# Audio processing
+librosa>=0.10.1
 soundfile>=0.12.1
-webrtcvad>=2.0.10
-# Note: transformers and torch/torchaudio above provide Kyutai STT support
 
-# Production Backend
-fastapi>=0.108.0
-uvicorn[standard]>=0.25.0
-pydantic>=2.5.0
-websockets>=12.0
-streamlit>=1.28.0  # Modern UI framework replacing Gradio
+# Image processing
+Pillow>=10.0.0
+opencv-python>=4.8.0
+rembg>=2.0.50
 
-# Advanced State Management
-apscheduler>=3.10.4
-aiosqlite>=0.19.0
+# 3D processing
+trimesh>=4.0.0
+numpy-stl>=3.1.1
+pygltflib>=1.16.1
 
-# Zero GPU Optimization (kept for speech engine compatibility)
-spaces>=0.28.0
-
-# 3D Generation Pipeline Dependencies
-gradio_client>=0.8.0  # For Hunyuan3D Space API integration
-trimesh>=4.0.0  # 3D mesh processing
-aiohttp>=3.9.0  # Async HTTP for API calls
-
-# Core Utilities
+# Scientific computing
 numpy>=1.24.0
-pandas>=2.1.0
-pillow>=10.1.0
-python-dateutil>=2.8.2
-emoji>=2.8.0
-psutil>=5.9.0
-
-# Async Support
-aiofiles>=23.2.0
-asyncio-mqtt>=0.16.1
-
-# Scientific Computing
 scipy>=1.11.0
 scikit-learn>=1.3.0
 
-# Development Tools
-pytest>=7.4.0
-black>=23.0.0
\ No newline at end of file
+# Utilities
+python-dateutil>=2.8.2
+tqdm>=4.66.0
+pyyaml>=6.0.1
+requests>=2.31.0
+aiofiles>=23.2.1
+
+# Optional optimizations
+# onnxruntime-gpu>=1.16.0  # For ONNX model support
+# xformers>=0.0.23  # For memory-efficient attention
\ No newline at end of file
diff --git a/run_digipal.py b/run_digipal.py
deleted file mode 100755
index 5be5d8ad4e7ca231217887f648be1754a4cf2ab0..0000000000000000000000000000000000000000
--- a/run_digipal.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python3
-"""
-DigiPal Launcher Script
-Starts both FastAPI backend and Streamlit frontend
-"""
-
-import subprocess
-import time
-import sys
-import os
-import threading
-import logging
-
-# Configure logging
-logging.basicConfig(
-    level=logging.INFO,
-    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
-)
-logger = logging.getLogger(__name__)
-
-def start_fastapi():
-    """Start FastAPI backend server"""
-    logger.info("Starting FastAPI backend server...")
-    try:
-        subprocess.run([sys.executable, "app.py"], check=True)
-    except subprocess.CalledProcessError as e:
-        logger.error(f"FastAPI server failed: {e}")
-    except KeyboardInterrupt:
-        logger.info("FastAPI server stopped")
-
-def start_streamlit():
-    """Start Streamlit frontend"""
-    logger.info("Starting Streamlit frontend...")
-    try:
-        port = os.getenv("STREAMLIT_PORT", "8501")
-        subprocess.run([
-            sys.executable, "-m", "streamlit", "run", 
-            "src/ui/streamlit_interface.py",
-            "--server.port", port,
-            "--server.address", "0.0.0.0"
-        ], check=True)
-    except subprocess.CalledProcessError as e:
-        logger.error(f"Streamlit frontend failed: {e}")
-    except KeyboardInterrupt:
-        logger.info("Streamlit frontend stopped")
-
-def main():
-    """Main launcher function"""
-    logger.info("🐉 DigiPal - Advanced AI Monster Companion")
-    logger.info("=" * 60)
-    logger.info("Starting both FastAPI backend and Streamlit frontend...")
-    api_port = os.getenv("API_PORT", "7861")
-    streamlit_port = os.getenv("STREAMLIT_PORT", "8501")
-    logger.info(f"FastAPI Backend: http://localhost:{api_port}")
-    logger.info(f"Streamlit Frontend: http://localhost:{streamlit_port}")
-    logger.info("=" * 60)
-    
-    # Create necessary directories
-    os.makedirs("data/saves", exist_ok=True)
-    os.makedirs("data/models", exist_ok=True)
-    os.makedirs("data/cache", exist_ok=True)
-    os.makedirs("logs", exist_ok=True)
-    
-    try:
-        # Start FastAPI in a separate thread
-        fastapi_thread = threading.Thread(target=start_fastapi, daemon=True)
-        fastapi_thread.start()
-        
-        # Give FastAPI time to start
-        time.sleep(3)
-        
-        # Start Streamlit (this will block)
-        start_streamlit()
-        
-    except KeyboardInterrupt:
-        logger.info("Shutting down DigiPal...")
-        sys.exit(0)
-
-if __name__ == "__main__":
-    main()
\ No newline at end of file
diff --git a/src/ai/__init__.py b/src/ai/__init__.py
deleted file mode 100644
index 4bb5022d6118211e0309cf3112ec6300c227045d..0000000000000000000000000000000000000000
--- a/src/ai/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# AI module initialization
\ No newline at end of file
diff --git a/src/ai/qwen_processor.py b/src/ai/qwen_processor.py
deleted file mode 100644
index f60c5d025017c1e55fe1a08f2f8d76cc6cfeac4b..0000000000000000000000000000000000000000
--- a/src/ai/qwen_processor.py
+++ /dev/null
@@ -1,621 +0,0 @@
-import torch
-from transformers import (
-    AutoModelForCausalLM, 
-    AutoTokenizer, 
-    pipeline,
-    BitsAndBytesConfig
-)
-# GPTQConfig is no longer needed - we'll use BitsAndBytesConfig for quantization
-import asyncio
-import logging
-from typing import Dict, List, Optional, Any
-import json
-import time
-from dataclasses import dataclass
-import spaces
-from datetime import datetime
-
-# Check for optional FlashAttention2 availability
-try:
-    import flash_attn
-    FLASH_ATTN_AVAILABLE = True
-except ImportError:
-    FLASH_ATTN_AVAILABLE = False
-
-@dataclass
-class ModelConfig:
-    model_name: str
-    max_memory_gb: float
-    inference_speed: str  # "fast", "balanced", "quality"
-    use_quantization: bool = True
-    use_flash_attention: bool = True
-
-class QwenProcessor:
-    def __init__(self, config: ModelConfig):
-        self.config = config
-        self.logger = logging.getLogger(__name__)
-        
-        # Model configurations for different performance tiers
-        self.model_configs = {
-            "fast": {
-                "model_name": "Qwen/Qwen2.5-1.5B-Instruct",
-                "torch_dtype": torch.bfloat16,
-                "device_map": "auto",
-                "attn_implementation": "flash_attention_2" if FLASH_ATTN_AVAILABLE else None,
-                "max_memory_gb": 4
-            },
-            "balanced": {
-                "model_name": "Qwen/Qwen2.5-3B-Instruct",
-                "torch_dtype": torch.bfloat16,
-                "device_map": "auto", 
-                "attn_implementation": "flash_attention_2" if FLASH_ATTN_AVAILABLE else None,
-                "max_memory_gb": 8
-            },
-            "quality": {
-                "model_name": "Qwen/Qwen2.5-7B-Instruct",
-                "torch_dtype": torch.bfloat16,
-                "device_map": "auto",
-                "attn_implementation": "flash_attention_2" if FLASH_ATTN_AVAILABLE else None,
-                "max_memory_gb": 16
-            }
-        }
-        
-        self.model = None
-        self.tokenizer = None
-        self.pipeline = None
-        self.conversation_cache = {}
-        
-        # Performance tracking
-        self.inference_times = []
-        self.memory_usage = []
-        
-    def _try_install_flash_attention(self) -> bool:
-        """Try to install FlashAttention2 at runtime if GPU is available"""
-        global FLASH_ATTN_AVAILABLE
-        
-        # If already available, no need to install
-        if FLASH_ATTN_AVAILABLE:
-            return True
-            
-        # Only attempt installation if GPU is available
-        if not torch.cuda.is_available():
-            self.logger.info("GPU not available, skipping FlashAttention2 installation")
-            return False
-            
-        try:
-            self.logger.info("Attempting to install FlashAttention2 at runtime...")
-            
-            # Try simple pip install first
-            import subprocess
-            import sys
-            
-            # Try installing with different approaches
-            commands = [
-                # Try with pre-compiled wheel
-                [sys.executable, "-m", "pip", "install", "flash-attn", "--prefer-binary", "--no-build-isolation"],
-                # Fallback to regular installation
-                [sys.executable, "-m", "pip", "install", "flash-attn", "--no-deps"],
-            ]
-            
-            for cmd in commands:
-                try:
-                    result = subprocess.run(cmd, capture_output=True, text=True, timeout=180)
-                    if result.returncode == 0:
-                        # Try importing to verify installation
-                        try:
-                            import flash_attn
-                            FLASH_ATTN_AVAILABLE = True
-                            self.logger.info("FlashAttention2 installed successfully!")
-                            return True
-                        except ImportError:
-                            continue
-                    else:
-                        self.logger.debug(f"Installation attempt failed: {result.stderr}")
-                        continue
-                except subprocess.TimeoutExpired:
-                    self.logger.warning("FlashAttention2 installation timed out")
-                    continue
-                except Exception as e:
-                    self.logger.debug(f"Installation attempt error: {e}")
-                    continue
-                    
-            self.logger.info("FlashAttention2 installation failed, will use default attention mechanism")
-            
-        except Exception as e:
-            self.logger.warning(f"Could not attempt FlashAttention2 installation: {e}")
-            
-        return False
-
-    async def initialize(self):
-        """Initialize the Qwen 2.5 model with optimizations"""
-        try:
-            model_config = self.model_configs[self.config.inference_speed]
-            
-            # Enhanced device detection for local vs Spaces environments
-            is_cpu_only = not torch.cuda.is_available()
-            is_spaces_gpu = False
-            
-            # Check if we're in Spaces with GPU
-            if torch.cuda.is_available():
-                try:
-                    # Test if we can actually use GPU
-                    torch.cuda.current_device()
-                    torch.cuda.empty_cache()
-                    is_spaces_gpu = True
-                    is_cpu_only = False
-                    self.logger.info("GPU detected and accessible - using GPU acceleration")
-                    
-                    # Try to install FlashAttention2 at runtime if not available
-                    if not FLASH_ATTN_AVAILABLE:
-                        self._try_install_flash_attention()
-                        
-                except Exception as e:
-                    self.logger.warning(f"GPU detected but not accessible: {e} - falling back to CPU")
-                    is_cpu_only = True
-                    is_spaces_gpu = False
-            else:
-                self.logger.info("No GPU detected - using CPU only")
-            
-            # Quantization configuration - optimize based on environment
-            if self.config.use_quantization and not is_cpu_only:
-                try:
-                    quantization_config = BitsAndBytesConfig(
-                        load_in_4bit=True,
-                        bnb_4bit_compute_dtype=torch.bfloat16,
-                        bnb_4bit_use_double_quant=True,
-                        bnb_4bit_quant_type="nf4"
-                    )
-                    self.logger.info("4-bit quantization enabled for GPU")
-                except Exception as e:
-                    self.logger.warning(f"Quantization failed, falling back to full precision: {e}")
-                    quantization_config = None
-            else:
-                quantization_config = None
-                if is_cpu_only:
-                    self.logger.info("CPU-only environment detected, disabling quantization")
-            
-            # Load tokenizer
-            self.tokenizer = AutoTokenizer.from_pretrained(
-                model_config["model_name"],
-                trust_remote_code=True,
-                use_fast=True
-            )
-            
-            # Adjust model configuration based on environment
-            model_kwargs = {
-                "trust_remote_code": True,
-                "use_cache": True,
-                "low_cpu_mem_usage": True
-            }
-            
-            if is_cpu_only:
-                # CPU-only optimizations
-                model_kwargs.update({
-                    "torch_dtype": torch.float32,  # Use float32 for CPU compatibility
-                    "device_map": "cpu"
-                })
-                self.logger.info("Loading model for CPU-only environment")
-            else:
-                # GPU optimizations - use FlashAttention2 when available
-                use_flash_attention = (self.config.use_flash_attention and 
-                                     is_spaces_gpu and 
-                                     FLASH_ATTN_AVAILABLE)
-                
-                if use_flash_attention:
-                    attn_implementation = model_config["attn_implementation"]
-                    self.logger.info("Loading model for GPU environment with FlashAttention2")
-                else:
-                    attn_implementation = None  # Use default attention
-                    if self.config.use_flash_attention and not FLASH_ATTN_AVAILABLE:
-                        self.logger.info("FlashAttention2 requested but not available, using default attention")
-                    else:
-                        self.logger.info("Loading model for GPU environment with default attention")
-                
-                model_kwargs.update({
-                    "torch_dtype": model_config["torch_dtype"],
-                    "device_map": model_config["device_map"],
-                    "attn_implementation": attn_implementation
-                })
-            
-            if quantization_config is not None:
-                model_kwargs["quantization_config"] = quantization_config
-            
-            # Load model with optimizations
-            import os
-            if os.getenv("SPACE_ID") and not is_cpu_only:
-                # Use GPU wrapper for ZeroGPU compatibility
-                self.model = gpu_model_initialization(
-                    AutoModelForCausalLM,
-                    model_config["model_name"],
-                    **model_kwargs
-                )
-            else:
-                # Direct model loading for local environments
-                self.model = AutoModelForCausalLM.from_pretrained(
-                    model_config["model_name"],
-                    **model_kwargs
-                )
-            
-            # Compile model for faster inference (PyTorch 2.0+) - only on GPU
-            if hasattr(torch, "compile") and not is_cpu_only:
-                try:
-                    self.model = torch.compile(self.model, mode="reduce-overhead")
-                    self.logger.info("Model compiled with torch.compile for faster inference")
-                except Exception as e:
-                    self.logger.warning(f"Model compilation failed: {e}")
-            
-            # Create pipeline with appropriate device mapping
-            pipeline_kwargs = {
-                "task": "text-generation",
-                "model": self.model,
-                "tokenizer": self.tokenizer,
-                "batch_size": 1,
-                "return_full_text": False
-            }
-            
-            if is_cpu_only:
-                pipeline_kwargs["device"] = -1  # CPU device for pipeline
-            # Do not pass device_map when model is already loaded with accelerate
-            
-            self.pipeline = pipeline(**pipeline_kwargs)
-            
-            self.logger.info(f"Qwen 2.5 model initialized: {model_config['model_name']} ({'GPU' if not is_cpu_only else 'CPU'})")
-            
-        except Exception as e:
-            self.logger.error(f"Failed to initialize Qwen model: {e}")
-            raise
-    
-    async def generate_monster_response(self, 
-                                      monster_data: Dict[str, Any],
-                                      user_input: str,
-                                      conversation_history: List[Dict[str, str]] = None) -> Dict[str, Any]:
-        """Generate contextual response based on monster personality and state"""
-        start_time = time.time()
-        
-        if conversation_history is None:
-            conversation_history = []
-        
-        try:
-            # Build context from monster data
-            context = self._build_context(monster_data, conversation_history)
-            
-            # Generate appropriate prompt based on monster state
-            prompt = self._generate_prompt(context, user_input)
-            
-            # Configure generation parameters based on monster personality
-            generation_params = self._get_generation_params(monster_data)
-            
-            # Generate response using the pipeline
-            response = await self._generate_response(prompt, generation_params)
-            
-            # Post-process response based on monster personality
-            processed_response = self._post_process_response(
-                response, 
-                monster_data
-            )
-            
-            # Calculate emotional impact
-            emotional_impact = self._calculate_emotional_impact(
-                user_input,
-                processed_response,
-                monster_data
-            )
-            
-            # Track memory usage
-            if torch.cuda.is_available():
-                memory_used = torch.cuda.max_memory_allocated() / 1024**3
-                self.memory_usage.append({
-                    "timestamp": datetime.now().isoformat(),
-                    "memory_gb": memory_used
-                })
-            
-            inference_time = time.time() - start_time
-            
-            return {
-                "response": processed_response,
-                "emotional_impact": emotional_impact,
-                "inference_time": inference_time,
-                "model_used": self.config.model_name,
-                "context_length": len(prompt)
-            }
-            
-        except Exception as e:
-            self.logger.error(f"Response generation failed: {e}")
-            # Return a fallback response
-            return {
-                "response": self._get_fallback_response(monster_data),
-                "emotional_impact": {"neutral": 1.0},
-                "inference_time": 0.0,
-                "error": str(e)
-            }
-    
-    def _build_personality_prompt(self, monster_data: Dict[str, Any]) -> str:
-        """Build personality description for the monster"""
-        personality = monster_data.get('personality', {})
-        
-        # Core personality traits
-        primary_type = personality.get('primary_type', 'playful')
-        traits = []
-        
-        # Big Five personality factors
-        if personality.get('extraversion', 0.5) > 0.7:
-            traits.append("very outgoing and social")
-        elif personality.get('extraversion', 0.5) < 0.3:
-            traits.append("more reserved and introspective")
-        
-        if personality.get('agreeableness', 0.5) > 0.7:
-            traits.append("extremely friendly and cooperative")
-        elif personality.get('agreeableness', 0.5) < 0.3:
-            traits.append("more independent and sometimes stubborn")
-        
-        if personality.get('conscientiousness', 0.5) > 0.7:
-            traits.append("very disciplined and organized")
-        elif personality.get('conscientiousness', 0.5) < 0.3:
-            traits.append("more spontaneous and carefree")
-        
-        if personality.get('openness', 0.5) > 0.7:
-            traits.append("very curious and imaginative")
-        elif personality.get('openness', 0.5) < 0.3:
-            traits.append("more practical and traditional")
-        
-        # Learned preferences
-        favorites = personality.get('favorite_foods', [])
-        dislikes = personality.get('disliked_foods', [])
-        
-        personality_text = f"Personality Type: {primary_type.title()}\n"
-        
-        if traits:
-            personality_text += f"You are {', '.join(traits)}.\n"
-        
-        if favorites:
-            personality_text += f"Your favorite foods are: {', '.join(favorites[:3])}.\n"
-        
-        if dislikes:
-            personality_text += f"You dislike: {', '.join(dislikes[:3])}.\n"
-        
-        # Relationship context
-        relationship_level = personality.get('relationship_level', 0)
-        if relationship_level > 80:
-            personality_text += "You have a very strong bond with your caretaker.\n"
-        elif relationship_level > 50:
-            personality_text += "You trust and like your caretaker.\n"
-        elif relationship_level > 20:
-            personality_text += "You're getting to know your caretaker.\n"
-        else:
-            personality_text += "You're still warming up to your caretaker.\n"
-        
-        return personality_text
-    
-    def _build_conversation_context(self, 
-                                  history: List[Dict[str, str]], 
-                                  monster_data: Dict[str, Any]) -> str:
-        """Build conversation context from recent history"""
-        if not history:
-            return "This is your first conversation together."
-        
-        # Get recent messages (last 3 exchanges)
-        recent_history = history[-6:] if len(history) > 6 else history
-        
-        context = "Recent conversation:\n"
-        for i, msg in enumerate(recent_history):
-            if msg.get('role') == 'user':
-                context += f"Human: {msg.get('content', '')}\n"
-            else:
-                context += f"You: {msg.get('content', '')}\n"
-        
-        return context
-    
-    def _post_process_response(self, response: str, monster_data: Dict[str, Any]) -> str:
-        """Post-process the generated response"""
-        # Remove any unwanted prefixes/suffixes
-        response = response.strip()
-        
-        # Remove common artifacts
-        unwanted_prefixes = ["Assistant:", "Monster:", "DigiPal:", monster_data['name'] + ":"]
-        for prefix in unwanted_prefixes:
-            if response.startswith(prefix):
-                response = response[len(prefix):].strip()
-        
-        # Ensure appropriate length
-        sentences = response.split('.')
-        if len(sentences) > 2:
-            response = '. '.join(sentences[:2]) + '.'
-        
-        # Add emojis if missing
-        if not self._has_emoji(response):
-            response = self._add_contextual_emoji(response, monster_data)
-        
-        return response
-    
-    def _has_emoji(self, text: str) -> bool:
-        """Check if text contains emojis"""
-        import emoji
-        return bool(emoji.emoji_count(text))
-    
-    def _add_contextual_emoji(self, response: str, monster_data: Dict[str, Any]) -> str:
-        """Add appropriate emoji based on context"""
-        emotional_state = monster_data.get('emotional_state', 'neutral')
-        
-        emoji_map = {
-            'ecstatic': ' 🤩',
-            'happy': ' 😊',
-            'content': ' 😌',
-            'neutral': ' 🙂',
-            'melancholy': ' 😔',
-            'sad': ' 😢',
-            'angry': ' 😠',
-            'sick': ' 🤒',
-            'excited': ' 😆',
-            'tired': ' 😴'
-        }
-        
-        return response + emoji_map.get(emotional_state, ' 🙂')
-    
-    def _analyze_emotional_impact(self, user_input: str, response: str) -> Dict[str, float]:
-        """Analyze the emotional impact of the interaction"""
-        # Simple keyword-based analysis (can be enhanced with sentiment models)
-        positive_keywords = ['love', 'good', 'great', 'amazing', 'wonderful', 'happy', 'fun']
-        negative_keywords = ['bad', 'sad', 'angry', 'hate', 'terrible', 'awful', 'sick']
-        
-        user_input_lower = user_input.lower()
-        
-        impact = {
-            'happiness': 0.0,
-            'stress': 0.0,
-            'bonding': 0.0
-        }
-        
-        # Analyze user input sentiment
-        for keyword in positive_keywords:
-            if keyword in user_input_lower:
-                impact['happiness'] += 0.1
-                impact['bonding'] += 0.05
-        
-        for keyword in negative_keywords:
-            if keyword in user_input_lower:
-                impact['happiness'] -= 0.1
-                impact['stress'] += 0.1
-        
-        # Base interaction bonus
-        impact['bonding'] += 0.02  # Small bonding increase for any interaction
-        
-        return impact
-    
-    def _get_fallback_response(self, monster_data: Dict[str, Any]) -> str:
-        """Get fallback response when AI generation fails"""
-        fallback_responses = [
-            f"*{monster_data['name']} looks at you curiously* 🤔",
-            f"*{monster_data['name']} makes a happy sound* 😊",
-            f"*{monster_data['name']} tilts head thoughtfully* 💭",
-            f"*{monster_data['name']} seems interested* 👀"
-        ]
-        
-        import random
-        return random.choice(fallback_responses)
-    
-    def get_performance_stats(self) -> Dict[str, Any]:
-        """Get model performance statistics"""
-        if not self.inference_times:
-            return {"status": "No inference data available"}
-        
-        avg_time = sum(self.inference_times) / len(self.inference_times)
-        
-        return {
-            "average_inference_time": avg_time,
-            "total_inferences": len(self.inference_times),
-            "fastest_inference": min(self.inference_times),
-            "slowest_inference": max(self.inference_times),
-            "tokens_per_second": 128 / avg_time,  # Approximate
-            "model_config": self.config.__dict__
-        }
-
-    def _build_context(self, monster_data: Dict[str, Any], conversation_history: List[Dict[str, str]]) -> str:
-        """Build complete context from monster data and conversation history"""
-        personality_prompt = self._build_personality_prompt(monster_data)
-        conversation_context = self._build_conversation_context(conversation_history, monster_data)
-        
-        context = f"""You are {monster_data['name']}, a virtual monster companion.
-
-{personality_prompt}
-
-Current State:
-- Health: {monster_data['stats']['health']}/100
-- Happiness: {monster_data['stats']['happiness']}/100
-- Energy: {monster_data['stats']['energy']}/100
-- Emotional State: {monster_data['emotional_state']}
-- Activity: {monster_data['current_activity']}
-
-Instructions:
-- Respond as this specific monster with this personality
-- Keep responses to 1-2 sentences maximum
-- Include 1-2 relevant emojis
-- Show personality through word choice and tone
-- React appropriately to your current stats and emotional state
-- Remember past conversations and build on them
-
-{conversation_context}"""
-        
-        return context
-    
-    def _generate_prompt(self, context: str, user_input: str) -> str:
-        """Generate the final prompt for the model"""
-        messages = [
-            {"role": "system", "content": context},
-            {"role": "user", "content": user_input}
-        ]
-        
-        # Format messages for Qwen 2.5
-        prompt = self.tokenizer.apply_chat_template(
-            messages,
-            tokenize=False,
-            add_generation_prompt=True
-        )
-        
-        return prompt
-    
-    def _get_generation_params(self, monster_data: Dict[str, Any]) -> Dict[str, Any]:
-        """Get generation parameters based on monster personality"""
-        personality_type = monster_data.get("personality", {}).get("type", "playful")
-        
-        # Base parameters
-        params = {
-            "max_new_tokens": 128,
-            "temperature": 0.8,
-            "top_p": 0.9,
-            "top_k": 50,
-            "do_sample": True,
-            "pad_token_id": self.tokenizer.eos_token_id,
-            "repetition_penalty": 1.1,
-            "no_repeat_ngram_size": 3
-        }
-        
-        # Adjust based on personality
-        if personality_type == "energetic":
-            params["temperature"] = 0.9
-            params["top_p"] = 0.95
-        elif personality_type == "wise":
-            params["temperature"] = 0.7
-            params["top_p"] = 0.85
-        elif personality_type == "mysterious":
-            params["temperature"] = 0.85
-            params["top_k"] = 40
-        
-        return params
-    
-    async def _generate_response(self, prompt: str, generation_params: Dict[str, Any]) -> str:
-        """Generate response using the pipeline"""
-        try:
-            # Check if we're in Spaces environment and GPU is available
-            import os
-            if os.getenv("SPACE_ID") and torch.cuda.is_available():
-                # Use GPU wrapper function for ZeroGPU compatibility
-                response_text = gpu_generate_response(self, prompt, generation_params)
-            else:
-                # Direct pipeline call for local/CPU environments
-                outputs = self.pipeline(prompt, **generation_params)
-                response_text = outputs[0]["generated_text"].strip()
-            return response_text
-        except Exception as e:
-            self.logger.error(f"Pipeline generation failed: {e}")
-            raise
-    
-    def _calculate_emotional_impact(self, user_input: str, response: str, monster_data: Dict[str, Any]) -> Dict[str, float]:
-        """Calculate the emotional impact of the interaction"""
-        return self._analyze_emotional_impact(user_input, response)
-
-# ZeroGPU wrapper functions
-@spaces.GPU(duration=120)
-def gpu_generate_response(processor, prompt: str, generation_params: Dict[str, Any]) -> str:
-    """GPU-accelerated response generation wrapper"""
-    try:
-        outputs = processor.pipeline(prompt, **generation_params)
-        response_text = outputs[0]["generated_text"].strip()
-        return response_text
-    except Exception as e:
-        logging.getLogger(__name__).error(f"GPU generation failed: {e}")
-        raise
-
-@spaces.GPU(duration=60)
-def gpu_model_initialization(model_class, model_name: str, **kwargs) -> Any:
-    """GPU-accelerated model initialization wrapper"""
-    return model_class.from_pretrained(model_name, **kwargs)
\ No newline at end of file
diff --git a/src/ai/speech_engine.py b/src/ai/speech_engine.py
deleted file mode 100644
index fcb2a10dc2f54eeeae8f33dfa275ee1bbd058332..0000000000000000000000000000000000000000
--- a/src/ai/speech_engine.py
+++ /dev/null
@@ -1,470 +0,0 @@
-import asyncio
-import numpy as np
-import torch
-import torchaudio
-from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
-import webrtcvad
-import logging
-from typing import Dict, List, Optional, Tuple, Any
-import time
-from dataclasses import dataclass
-import io
-import wave
-import spaces
-
-@dataclass
-class SpeechConfig:
-    model_name: str = "kyutai/stt-2.6b-en"  # Kyutai STT model
-    device: str = "auto"
-    torch_dtype: str = "float16"
-    use_vad: bool = True
-    vad_aggressiveness: int = 2  # 0-3, higher = more aggressive
-    chunk_duration_ms: int = 30  # VAD chunk size
-    sample_rate: int = 16000
-    use_pipeline: bool = True  # Use transformers pipeline for easier integration
-
-class AdvancedSpeechEngine:
-    def __init__(self, config: SpeechConfig):
-        self.config = config
-        self.logger = logging.getLogger(__name__)
-        
-        # Kyutai STT model configurations
-        self.model_info = {
-            "name": "Kyutai STT-2.6B-EN",
-            "description": "Multilingual speech-to-text model optimized for English",
-            "memory_gb": 6,  # Approximate memory requirement for 2.6B model
-            "speed": "fast",
-            "accuracy": "high"
-        }
-        
-        self.speech_pipeline = None
-        self.model = None
-        self.processor = None
-        self.vad_model = None
-        
-        # Performance tracking
-        self.transcription_times = []
-        self.accuracy_scores = []
-        
-        # Audio processing
-        self.audio_buffer = []
-        self.is_processing = False
-        
-    async def initialize(self):
-        """Initialize the Kyutai STT speech recognition system"""
-        try:
-            # Enhanced device detection for local vs Spaces environments
-            device = self.config.device
-            torch_dtype = self.config.torch_dtype
-            
-            if device == "auto":
-                # For Zero GPU environments, try GPU first, fallback to CPU
-                if torch.cuda.is_available():
-                    try:
-                        # Test CUDA availability properly
-                        torch.cuda.current_device()
-                        torch.cuda.empty_cache()
-                        device = "cuda"
-                        self.logger.info("GPU detected and accessible for speech processing")
-                    except Exception as cuda_error:
-                        # CUDA not properly accessible, use CPU
-                        device = "cpu"
-                        if torch_dtype == "float16":
-                            torch_dtype = "float32"
-                        self.logger.info(f"CUDA not accessible ({cuda_error}), using CPU with float32")
-                else:
-                    device = "cpu"
-                    if torch_dtype == "float16":
-                        torch_dtype = "float32"
-                    self.logger.info("CUDA not available, using CPU with float32")
-            
-            # Adjust torch_dtype for CPU
-            if device == "cpu" and torch_dtype == "float16":
-                torch_dtype = "float32"  # Use float32 for CPU instead of float16
-                self.logger.info("CPU detected, switching from float16 to float32 dtype")
-            
-            # Convert string dtype to torch dtype
-            dtype_map = {
-                "float16": torch.float16,
-                "float32": torch.float32,
-                "bfloat16": torch.bfloat16
-            }
-            torch_dtype_obj = dtype_map.get(torch_dtype, torch.float32)
-            
-            # Initialize Kyutai STT with proper error handling
-            try:
-                if self.config.use_pipeline:
-                    # Use transformers pipeline for easier integration
-                    self.speech_pipeline = pipeline(
-                        "automatic-speech-recognition",
-                        model=self.config.model_name,
-                        torch_dtype=torch_dtype_obj,
-                        device=device,
-                        cache_dir="data/models/"
-                    )
-                    self.logger.info(f"Kyutai STT pipeline loaded on {device} with {torch_dtype}")
-                else:
-                    # Load model and processor separately for more control
-                    self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
-                        self.config.model_name,
-                        torch_dtype=torch_dtype_obj,
-                        device_map="auto" if device == "cuda" else None,
-                        cache_dir="data/models/"
-                    )
-                    self.processor = AutoProcessor.from_pretrained(
-                        self.config.model_name,
-                        cache_dir="data/models/"
-                    )
-                    
-                    if device == "cuda" and not hasattr(self.model, 'device_map'):
-                        self.model = self.model.to(device)
-                    
-                    self.logger.info(f"Kyutai STT model and processor loaded on {device} with {torch_dtype}")
-                    
-            except Exception as model_error:
-                # Final fallback to CPU with basic settings
-                self.logger.warning(f"Failed to load on {device}, falling back to CPU: {model_error}")
-                
-                if self.config.use_pipeline:
-                    self.speech_pipeline = pipeline(
-                        "automatic-speech-recognition",
-                        model=self.config.model_name,
-                        torch_dtype=torch.float32,
-                        device="cpu",
-                        cache_dir="data/models/"
-                    )
-                else:
-                    self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
-                        self.config.model_name,
-                        torch_dtype=torch.float32,
-                        device_map=None,
-                        cache_dir="data/models/"
-                    )
-                    self.processor = AutoProcessor.from_pretrained(
-                        self.config.model_name,
-                        cache_dir="data/models/"
-                    )
-                    self.model = self.model.to("cpu")
-                
-                self.logger.info("Kyutai STT model loaded on CPU (fallback)")
-            
-            # Initialize VAD if enabled
-            if self.config.use_vad:
-                self.vad_model = webrtcvad.Vad(self.config.vad_aggressiveness)
-            
-            self.logger.info(f"Kyutai STT speech engine initialized: {self.config.model_name} on {device}")
-            
-        except Exception as e:
-            self.logger.error(f"Failed to initialize Kyutai STT speech engine: {e}")
-            raise
-    
-    async def process_audio_stream(self, audio_data: np.ndarray) -> Dict[str, Any]:
-        """Process streaming audio for real-time transcription"""
-        start_time = time.time()
-        
-        try:
-            # Convert audio format if needed
-            if len(audio_data.shape) > 1:
-                audio_data = audio_data.mean(axis=1)  # Convert to mono
-            
-            # Normalize audio
-            audio_data = audio_data.astype(np.float32)
-            if np.max(np.abs(audio_data)) > 0:
-                audio_data = audio_data / np.max(np.abs(audio_data))
-            
-            # Voice Activity Detection
-            if self.config.use_vad:
-                has_speech = self._detect_speech_activity(audio_data)
-                if not has_speech:
-                    return {
-                        "success": True,
-                        "transcription": "",
-                        "confidence": 0.0,
-                        "processing_time": time.time() - start_time,
-                        "has_speech": False
-                    }
-            
-            # Transcribe with Kyutai STT
-            if self.config.use_pipeline and self.speech_pipeline:
-                # Use pipeline for simpler transcription
-                result = self.speech_pipeline(
-                    audio_data,
-                    generate_kwargs={
-                        "language": "en",
-                        "task": "transcribe",
-                        "max_new_tokens": 256
-                    }
-                )
-                
-                transcription = result["text"].strip()
-                # Pipeline doesn't provide confidence scores directly
-                confidence = 0.8  # Default confidence for pipeline
-                
-            else:
-                # Use model and processor for more control
-                # Prepare inputs
-                inputs = self.processor(
-                    audio_data,
-                    sampling_rate=self.config.sample_rate,
-                    return_tensors="pt"
-                )
-                
-                # Move inputs to device
-                device = next(self.model.parameters()).device
-                inputs = {k: v.to(device) for k, v in inputs.items()}
-                
-                # Generate transcription
-                with torch.no_grad():
-                    generated_tokens = self.model.generate(
-                        **inputs,
-                        language="en",
-                        task="transcribe",
-                        max_new_tokens=256,
-                        num_beams=1,  # Faster inference
-                        do_sample=False,
-                        temperature=1.0
-                    )
-                
-                # Decode transcription
-                transcription = self.processor.batch_decode(
-                    generated_tokens, 
-                    skip_special_tokens=True
-                )[0].strip()
-                
-                # Calculate confidence (simplified)
-                confidence = 0.8  # Default confidence
-            
-            processing_time = time.time() - start_time
-            self.transcription_times.append(processing_time)
-            
-            # Analyze speech characteristics
-            speech_analysis = self._analyze_speech_characteristics(audio_data, transcription)
-            
-            return {
-                "success": True,
-                "transcription": transcription,
-                "confidence": confidence,
-                "processing_time": processing_time,
-                "has_speech": True,
-                "speech_analysis": speech_analysis,
-                "detected_language": "en",  # Kyutai model is optimized for English
-                "language_probability": 1.0,
-                "model": "kyutai-stt-2.6b-en"
-            }
-            
-        except Exception as e:
-            self.logger.error(f"Audio processing failed: {e}")
-            return {
-                "success": False,
-                "transcription": "",
-                "confidence": 0.0,
-                "processing_time": time.time() - start_time,
-                "error": str(e)
-            }
-    
-    def _detect_speech_activity(self, audio_data: np.ndarray) -> bool:
-        """Detect if audio contains speech using WebRTC VAD"""
-        try:
-            # Convert to 16-bit PCM
-            pcm_data = (audio_data * 32767).astype(np.int16)
-            
-            # Split into chunks for VAD processing
-            chunk_size = int(self.config.sample_rate * self.config.chunk_duration_ms / 1000)
-            speech_chunks = 0
-            total_chunks = 0
-            
-            for i in range(0, len(pcm_data), chunk_size):
-                chunk = pcm_data[i:i+chunk_size]
-                
-                # Pad chunk if necessary
-                if len(chunk) < chunk_size:
-                    chunk = np.pad(chunk, (0, chunk_size - len(chunk)), mode='constant')
-                
-                # Convert to bytes
-                chunk_bytes = chunk.tobytes()
-                
-                # Check for speech
-                if self.vad_model.is_speech(chunk_bytes, self.config.sample_rate):
-                    speech_chunks += 1
-                
-                total_chunks += 1
-            
-            # Consider it speech if > 30% of chunks contain speech
-            speech_ratio = speech_chunks / total_chunks if total_chunks > 0 else 0
-            return speech_ratio > 0.3
-            
-        except Exception as e:
-            self.logger.warning(f"VAD processing failed: {e}")
-            return True  # Default to processing if VAD fails
-    
-    def _logprob_to_confidence(self, avg_logprob: float) -> float:
-        """Convert log probability to confidence score"""
-        # Empirical mapping from log probability to confidence
-        # Faster Whisper typically gives log probs between -3.0 and 0.0
-        confidence = max(0.0, min(1.0, (avg_logprob + 3.0) / 3.0))
-        return confidence
-    
-    def _analyze_speech_characteristics(self, audio_data: np.ndarray, transcription: str) -> Dict[str, Any]:
-        """Analyze speech characteristics for emotional context"""
-        try:
-            import librosa
-            
-            # Basic audio features
-            duration = len(audio_data) / self.config.sample_rate
-            
-            # Energy/Volume analysis
-            rms_energy = np.sqrt(np.mean(audio_data ** 2))
-            
-            # Pitch analysis
-            pitches, magnitudes = librosa.piptrack(
-                y=audio_data, 
-                sr=self.config.sample_rate,
-                threshold=0.1
-            )
-            
-            # Extract fundamental frequency
-            pitch_values = pitches[magnitudes > np.max(magnitudes) * 0.1]
-            if len(pitch_values) > 0:
-                avg_pitch = np.mean(pitch_values)
-                pitch_variance = np.var(pitch_values)
-            else:
-                avg_pitch = 0.0
-                pitch_variance = 0.0
-            
-            # Speaking rate (words per minute)
-            word_count = len(transcription.split()) if transcription else 0
-            speaking_rate = (word_count / duration * 60) if duration > 0 else 0
-            
-            # Emotional indicators (basic)
-            emotions = {
-                "excitement": min(1.0, rms_energy * 10),  # Higher energy = more excited
-                "calmness": max(0.0, 1.0 - (pitch_variance / 1000)),  # Lower pitch variance = calmer
-                "engagement": min(1.0, speaking_rate / 200),  # Normal speaking rate indicates engagement
-                "stress": min(1.0, max(0.0, (avg_pitch - 200) / 100))  # Higher pitch can indicate stress
-            }
-            
-            return {
-                "duration": duration,
-                "energy": rms_energy,
-                "average_pitch": avg_pitch,
-                "pitch_variance": pitch_variance,
-                "speaking_rate": speaking_rate,
-                "word_count": word_count,
-                "emotional_indicators": emotions
-            }
-            
-        except Exception as e:
-            self.logger.warning(f"Speech analysis failed: {e}")
-            return {
-                "duration": 0.0,
-                "energy": 0.0,
-                "emotional_indicators": {}
-            }
-    
-    async def batch_transcribe(self, audio_files: List[str]) -> List[Dict[str, Any]]:
-        """Batch transcribe multiple audio files using Kyutai STT"""
-        results = []
-        
-        for audio_file in audio_files:
-            try:
-                # Load audio file - use torchaudio for better PyTorch integration
-                audio_data, sample_rate = torchaudio.load(audio_file)
-                
-                # Convert to numpy and ensure mono
-                audio_data = audio_data.numpy()
-                if len(audio_data.shape) > 1:
-                    audio_data = audio_data.mean(axis=0)  # Convert to mono
-                
-                # Resample if necessary
-                if sample_rate != self.config.sample_rate:
-                    # Use torchaudio for resampling
-                    audio_tensor = torch.from_numpy(audio_data).unsqueeze(0)
-                    resampler = torchaudio.transforms.Resample(sample_rate, self.config.sample_rate)
-                    audio_tensor = resampler(audio_tensor)
-                    audio_data = audio_tensor.squeeze(0).numpy()
-                
-                # Process
-                result = await self.process_audio_stream(audio_data)
-                result["file_path"] = audio_file
-                result["original_sample_rate"] = sample_rate
-                
-                results.append(result)
-                
-            except Exception as e:
-                self.logger.error(f"Failed to process {audio_file}: {e}")
-                results.append({
-                    "success": False,
-                    "file_path": audio_file,
-                    "error": str(e)
-                })
-        
-        return results
-    
-    def get_performance_stats(self) -> Dict[str, Any]:
-        """Get speech processing performance statistics"""
-        if not self.transcription_times:
-            return {"status": "No transcription data available"}
-        
-        avg_time = sum(self.transcription_times) / len(self.transcription_times)
-        
-        return {
-            "average_processing_time": avg_time,
-            "total_transcriptions": len(self.transcription_times),
-            "fastest_transcription": min(self.transcription_times),
-            "slowest_transcription": max(self.transcription_times),
-            "model_config": self.config.__dict__,
-            "estimated_real_time_factor": avg_time / 1.0  # Assuming 1 second audio clips
-        }
-    
-    def optimize_for_hardware(self, available_vram_gb: float) -> SpeechConfig:
-        """Optimize Kyutai STT config based on available hardware"""
-        # Kyutai STT-2.6B requires about 6GB VRAM for optimal performance
-        if available_vram_gb >= 8:
-            return SpeechConfig(
-                model_name="kyutai/stt-2.6b-en",
-                device="cuda",
-                torch_dtype="float16",
-                use_vad=True,
-                use_pipeline=True
-            )
-        elif available_vram_gb >= 6:
-            return SpeechConfig(
-                model_name="kyutai/stt-2.6b-en",
-                device="cuda",
-                torch_dtype="float32",
-                use_vad=True,
-                use_pipeline=True
-            )
-        elif available_vram_gb >= 4:
-            return SpeechConfig(
-                model_name="kyutai/stt-2.6b-en",
-                device="cuda",
-                torch_dtype="float32",
-                use_vad=True,
-                use_pipeline=False  # More memory efficient without pipeline
-            )
-        else:
-            return SpeechConfig(
-                model_name="kyutai/stt-2.6b-en",
-                device="cpu",
-                torch_dtype="float32",
-                use_vad=True,
-                use_pipeline=True
-            )
-
-# Apply GPU decorator to methods after class definition for ZeroGPU compatibility
-try:
-    import os
-    if os.getenv("SPACE_ID") is not None:
-        # We're in Spaces environment, apply GPU decorator for Kyutai STT
-        AdvancedSpeechEngine.process_audio_stream = spaces.GPU(
-            AdvancedSpeechEngine.process_audio_stream, 
-            duration=120  # Kyutai STT may take longer than Whisper
-        )
-        AdvancedSpeechEngine.batch_transcribe = spaces.GPU(
-            AdvancedSpeechEngine.batch_transcribe,
-            duration=300  # Batch processing may take longer
-        )
-except (ImportError, NotImplementedError, AttributeError) as e:
-    # GPU decorator not available or failed, continue without it
-    pass
\ No newline at end of file
diff --git a/src/core/evolution_system.py b/src/core/evolution_system.py
deleted file mode 100644
index 12349c49e7eb68a1e7b8e79d352b115367bacf98..0000000000000000000000000000000000000000
--- a/src/core/evolution_system.py
+++ /dev/null
@@ -1,655 +0,0 @@
-import asyncio
-import logging
-from typing import Dict, List, Optional, Any, Tuple
-from datetime import datetime, timedelta
-from enum import Enum
-import random
-import json
-
-from .monster_engine import Monster, EvolutionStage, MonsterPersonalityType, EmotionalState
-
-class EvolutionTrigger(str, Enum):
-    TIME_BASED = "time_based"
-    STAT_BASED = "stat_based"
-    CARE_BASED = "care_based"
-    ITEM_BASED = "item_based"
-    SPECIAL_EVENT = "special_event"
-    TRAINING_BASED = "training_based"
-    RELATIONSHIP_BASED = "relationship_based"
-
-class EvolutionPath(str, Enum):
-    NORMAL = "normal"
-    VARIANT = "variant"
-    SPECIAL = "special"
-    CORRUPTED = "corrupted"
-    LEGENDARY = "legendary"
-
-class EvolutionSystem:
-    def __init__(self):
-        self.logger = logging.getLogger(__name__)
-        
-        # Evolution trees and requirements
-        self.evolution_trees = self._initialize_evolution_trees()
-        self.evolution_requirements = self._initialize_evolution_requirements()
-        self.special_conditions = self._initialize_special_conditions()
-        
-        # Evolution modifiers
-        self.care_quality_thresholds = {
-            "excellent": 1.8,
-            "good": 1.4,
-            "average": 1.0,
-            "poor": 0.6,
-            "terrible": 0.3
-        }
-        
-    def _initialize_evolution_trees(self) -> Dict[str, Dict[str, List[Dict[str, Any]]]]:
-        """Initialize the complete evolution tree structure"""
-        return {
-            "Botamon": {
-                EvolutionStage.BABY: [
-                    {
-                        "species": "Koromon",
-                        "path": EvolutionPath.NORMAL,
-                        "requirements": {
-                            "age_minutes": 60,
-                            "care_mistakes_max": 0,
-                            "health_min": 80
-                        }
-                    }
-                ]
-            },
-            "Koromon": {
-                EvolutionStage.CHILD: [
-                    {
-                        "species": "Agumon",
-                        "path": EvolutionPath.NORMAL,
-                        "requirements": {
-                            "age_minutes": 1440,  # 24 hours
-                            "stats_min": {"offense": 150, "life": 1200},
-                            "training_min": {"strength": 30},
-                            "care_quality_min": 1.0
-                        }
-                    },
-                    {
-                        "species": "Betamon", 
-                        "path": EvolutionPath.VARIANT,
-                        "requirements": {
-                            "age_minutes": 1440,
-                            "stats_min": {"defense": 150, "brains": 120},
-                            "training_min": {"intelligence": 30},
-                            "care_quality_min": 1.2
-                        }
-                    },
-                    {
-                        "species": "Kunemon",
-                        "path": EvolutionPath.CORRUPTED,
-                        "requirements": {
-                            "age_minutes": 1440,
-                            "care_mistakes_min": 3,
-                            "happiness_max": 40
-                        }
-                    }
-                ]
-            },
-            "Agumon": {
-                EvolutionStage.ADULT: [
-                    {
-                        "species": "Greymon",
-                        "path": EvolutionPath.NORMAL,
-                        "requirements": {
-                            "age_minutes": 4320,  # 72 hours
-                            "stats_min": {"offense": 250, "life": 1800},
-                            "training_min": {"strength": 80},
-                            "care_quality_min": 1.3,
-                            "battle_wins_min": 5
-                        }
-                    },
-                    {
-                        "species": "Tyrannomon",
-                        "path": EvolutionPath.VARIANT,
-                        "requirements": {
-                            "age_minutes": 4320,
-                            "stats_min": {"offense": 300, "life": 2000},
-                            "training_min": {"strength": 100, "endurance": 50},
-                            "care_quality_min": 1.1,
-                            "discipline_min": 70
-                        }
-                    },
-                    {
-                        "species": "Meramon",
-                        "path": EvolutionPath.SPECIAL,
-                        "requirements": {
-                            "age_minutes": 4320,
-                            "stats_min": {"offense": 200, "brains": 180},
-                            "training_min": {"spirit": 60},
-                            "special_item": "Fire_Crystal",
-                            "care_quality_min": 1.5
-                        }
-                    }
-                ]
-            },
-            "Greymon": {
-                EvolutionStage.PERFECT: [
-                    {
-                        "species": "MetalGreymon",
-                        "path": EvolutionPath.NORMAL,
-                        "requirements": {
-                            "age_minutes": 8640,  # 144 hours (6 days)
-                            "stats_min": {"offense": 400, "life": 2800, "defense": 300},
-                            "training_min": {"strength": 150, "technique": 100},
-                            "care_quality_min": 1.6,
-                            "battle_wins_min": 15,
-                            "relationship_level_min": 80
-                        }
-                    },
-                    {
-                        "species": "SkullGreymon",
-                        "path": EvolutionPath.CORRUPTED,
-                        "requirements": {
-                            "age_minutes": 8640,
-                            "stats_min": {"offense": 450},
-                            "care_mistakes_min": 8,
-                            "overtraining": True,
-                            "happiness_max": 30
-                        }
-                    }
-                ]
-            },
-            "MetalGreymon": {
-                EvolutionStage.ULTIMATE: [
-                    {
-                        "species": "WarGreymon",
-                        "path": EvolutionPath.LEGENDARY,
-                        "requirements": {
-                            "age_minutes": 14400,  # 10 days
-                            "stats_min": {"offense": 600, "life": 4000, "defense": 500, "brains": 400},
-                            "training_min": {"strength": 200, "technique": 150, "spirit": 100},
-                            "care_quality_min": 1.8,
-                            "battle_wins_min": 50,
-                            "relationship_level_min": 95,
-                            "special_achievements": ["Perfect_Care_Week", "Master_Trainer"]
-                        }
-                    }
-                ]
-            }
-        }
-    
-    def _initialize_evolution_requirements(self) -> Dict[str, Any]:
-        """Initialize detailed evolution requirement checkers"""
-        return {
-            "age_requirements": {
-                "check": lambda monster, req: monster.lifecycle.age_minutes >= req,
-                "display": lambda req: f"Age: {req/1440:.1f} days"
-            },
-            "stat_requirements": {
-                "check": self._check_stat_requirements,
-                "display": lambda req: f"Stats: {', '.join([f'{k}≥{v}' for k, v in req.items()])}"
-            },
-            "training_requirements": {
-                "check": self._check_training_requirements,
-                "display": lambda req: f"Training: {', '.join([f'{k}≥{v}' for k, v in req.items()])}"
-            },
-            "care_quality_requirements": {
-                "check": lambda monster, req: monster.stats.care_quality_score >= req,
-                "display": lambda req: f"Care Quality: {req:.1f}"
-            },
-            "item_requirements": {
-                "check": self._check_item_requirements,
-                "display": lambda req: f"Required Item: {req}"
-            },
-            "special_requirements": {
-                "check": self._check_stat_requirements,
-                "display": lambda req: f"Special: {', '.join(req) if isinstance(req, list) else req}"
-            }
-        }
-    
-    def _initialize_special_conditions(self) -> Dict[str, Any]:
-        """Initialize special evolution conditions"""
-        return {
-            "perfect_care_week": {
-                "description": "No care mistakes for 7 consecutive days",
-                "check": self._check_perfect_care_week
-            },
-            "master_trainer": {
-                "description": "Complete all training types to level 150+",
-                "check": self._check_master_trainer
-            },
-            "bond_master": {
-                "description": "Reach maximum relationship level",
-                "check": lambda monster: monster.personality.relationship_level >= 100
-            },
-            "evolution_master": {
-                "description": "Successfully evolve 10+ monsters",
-                "check": self._check_evolution_master
-            },
-            "overtraining": {
-                "description": "Training stats significantly exceed normal limits",
-                "check": self._check_overtraining
-            }
-        }
-    
-    async def check_evolution_eligibility(self, monster: Monster) -> Dict[str, Any]:
-        """Check if monster is eligible for evolution and return detailed info"""
-        try:
-            current_species = monster.species
-            current_stage = monster.lifecycle.stage
-            
-            # Get possible evolutions
-            possible_evolutions = self.evolution_trees.get(current_species, {}).get(current_stage, [])
-            
-            if not possible_evolutions:
-                return {
-                    "can_evolve": False,
-                    "reason": "No evolution paths available",
-                    "possible_evolutions": []
-                }
-            
-            evolution_results = []
-            
-            for evolution_option in possible_evolutions:
-                species = evolution_option["species"]
-                path = evolution_option["path"]
-                requirements = evolution_option["requirements"]
-                
-                # Check each requirement
-                met_requirements = []
-                missing_requirements = []
-                
-                for req_type, req_value in requirements.items():
-                    is_met = await self._check_requirement(monster, req_type, req_value)
-                    
-                    requirement_info = {
-                        "type": req_type,
-                        "requirement": req_value,
-                        "current_value": self._get_current_value(monster, req_type),
-                        "is_met": is_met
-                    }
-                    
-                    if is_met:
-                        met_requirements.append(requirement_info)
-                    else:
-                        missing_requirements.append(requirement_info)
-                
-                # Calculate evolution readiness percentage
-                total_requirements = len(met_requirements) + len(missing_requirements)
-                readiness_percentage = (len(met_requirements) / total_requirements * 100) if total_requirements > 0 else 0
-                
-                evolution_results.append({
-                    "species": species,
-                    "path": path.value,
-                    "readiness_percentage": readiness_percentage,
-                    "can_evolve": len(missing_requirements) == 0,
-                    "met_requirements": met_requirements,
-                    "missing_requirements": missing_requirements,
-                    "estimated_time_to_eligible": self._estimate_time_to_eligible(missing_requirements)
-                })
-            
-            # Find the best evolution option
-            eligible_evolutions = [e for e in evolution_results if e["can_evolve"]]
-            best_option = max(evolution_results, key=lambda x: x["readiness_percentage"]) if evolution_results else None
-            
-            return {
-                "can_evolve": len(eligible_evolutions) > 0,
-                "eligible_evolutions": eligible_evolutions,
-                "best_option": best_option,
-                "all_options": evolution_results,
-                "evolution_locked": monster.lifecycle.evolution_locked_until and 
-                                  monster.lifecycle.evolution_locked_until > datetime.now()
-            }
-            
-        except Exception as e:
-            self.logger.error(f"Evolution eligibility check failed: {e}")
-            return {
-                "can_evolve": False,
-                "reason": f"Error checking evolution: {str(e)}",
-                "possible_evolutions": []
-            }
-    
-    async def trigger_evolution(self, monster: Monster, target_species: str = None) -> Dict[str, Any]:
-        """Trigger monster evolution"""
-        try:
-            # Check if evolution is locked
-            if monster.lifecycle.evolution_locked_until and monster.lifecycle.evolution_locked_until > datetime.now():
-                return {
-                    "success": False,
-                    "reason": "Evolution is temporarily locked",
-                    "unlock_time": monster.lifecycle.evolution_locked_until
-                }
-            
-            # Get evolution eligibility
-            eligibility = await self.check_evolution_eligibility(monster)
-            
-            if not eligibility["can_evolve"]:
-                return {
-                    "success": False,
-                    "reason": "Evolution requirements not met",
-                    "eligibility": eligibility
-                }
-            
-            # Select evolution target
-            eligible_evolutions = eligibility["eligible_evolutions"]
-            
-            if target_species:
-                # Specific evolution requested
-                target_evolution = next((e for e in eligible_evolutions if e["species"] == target_species), None)
-                if not target_evolution:
-                    return {
-                        "success": False,
-                        "reason": f"Cannot evolve to {target_species}",
-                        "available_options": [e["species"] for e in eligible_evolutions]
-                    }
-            else:
-                # Choose best available evolution
-                target_evolution = max(eligible_evolutions, key=lambda x: x["readiness_percentage"])
-            
-            # Store previous state
-            previous_species = monster.species
-            previous_stage = monster.lifecycle.stage
-            
-            # Apply evolution
-            await self._apply_evolution(monster, target_evolution)
-            
-            # Log evolution event
-            evolution_result = {
-                "success": True,
-                "previous_species": previous_species,
-                "previous_stage": previous_stage.value,
-                "new_species": monster.species,
-                "new_stage": monster.lifecycle.stage.value,
-                "evolution_path": target_evolution["path"],
-                "stat_bonuses": self._calculate_evolution_bonuses(target_evolution),
-                "timestamp": datetime.now()
-            }
-            
-            self.logger.info(f"Monster evolved: {previous_species} -> {monster.species}")
-            
-            return evolution_result
-            
-        except Exception as e:
-            self.logger.error(f"Evolution trigger failed: {e}")
-            return {
-                "success": False,
-                "reason": f"Evolution failed: {str(e)}"
-            }
-    
-    async def _apply_evolution(self, monster: Monster, evolution_data: Dict[str, Any]):
-        """Apply evolution changes to monster"""
-        # Update basic info
-        monster.species = evolution_data["species"]
-        
-        # Determine new stage
-        stage_progression = {
-            EvolutionStage.EGG: EvolutionStage.BABY,
-            EvolutionStage.BABY: EvolutionStage.CHILD,
-            EvolutionStage.CHILD: EvolutionStage.ADULT,
-            EvolutionStage.ADULT: EvolutionStage.PERFECT,
-            EvolutionStage.PERFECT: EvolutionStage.ULTIMATE,
-            EvolutionStage.ULTIMATE: EvolutionStage.MEGA
-        }
-        
-        new_stage = stage_progression.get(monster.lifecycle.stage)
-        if new_stage:
-            monster.lifecycle.stage = new_stage
-        
-        # Apply stat bonuses
-        bonuses = self._calculate_evolution_bonuses(evolution_data)
-        for stat, bonus in bonuses.items():
-            if hasattr(monster.stats, stat):
-                current_value = getattr(monster.stats, stat)
-                new_value = int(current_value * bonus["multiplier"]) + bonus["flat_bonus"]
-                setattr(monster.stats, stat, new_value)
-        
-        # Reset some care stats
-        monster.stats.happiness = min(100, monster.stats.happiness + 20)
-        monster.stats.health = min(100, monster.stats.health + 30)
-        monster.stats.energy = min(100, monster.stats.energy + 40)
-        
-        # Update personality based on evolution path
-        self._apply_personality_changes(monster, evolution_data["path"])
-        
-        # Set evolution cooldown
-        monster.lifecycle.evolution_locked_until = datetime.now() + timedelta(hours=24)
-        
-        # Update emotional state
-        monster.emotional_state = EmotionalState.ECSTATIC
-        
-        # Add evolution achievement
-        if "special_achievements" not in monster.performance_metrics:
-            monster.performance_metrics["special_achievements"] = []
-        
-        monster.performance_metrics["special_achievements"].append({
-            "type": "evolution",
-            "species": monster.species,
-            "timestamp": datetime.now().isoformat()
-        })
-    
-    def _calculate_evolution_bonuses(self, evolution_data: Dict[str, Any]) -> Dict[str, Dict[str, float]]:
-        """Calculate stat bonuses for evolution"""
-        base_bonuses = {
-            "life": {"multiplier": 1.3, "flat_bonus": 200},
-            "mp": {"multiplier": 1.2, "flat_bonus": 50},
-            "offense": {"multiplier": 1.25, "flat_bonus": 30},
-            "defense": {"multiplier": 1.25, "flat_bonus": 30},
-            "speed": {"multiplier": 1.2, "flat_bonus": 20},
-            "brains": {"multiplier": 1.15, "flat_bonus": 25}
-        }
-        
-        # Modify bonuses based on evolution path
-        path_modifiers = {
-            EvolutionPath.NORMAL: 1.0,
-            EvolutionPath.VARIANT: 1.1,
-            EvolutionPath.SPECIAL: 1.3,
-            EvolutionPath.CORRUPTED: 0.9,
-            EvolutionPath.LEGENDARY: 1.5
-        }
-        
-        evolution_path = EvolutionPath(evolution_data["path"])
-        modifier = path_modifiers.get(evolution_path, 1.0)
-        
-        # Apply modifier to bonuses
-        modified_bonuses = {}
-        for stat, bonus in base_bonuses.items():
-            modified_bonuses[stat] = {
-                "multiplier": bonus["multiplier"] * modifier,
-                "flat_bonus": int(bonus["flat_bonus"] * modifier)
-            }
-        
-        return modified_bonuses
-    
-    def _apply_personality_changes(self, monster: Monster, evolution_path: str):
-        """Apply personality changes based on evolution path"""
-        path_personality_effects = {
-            EvolutionPath.NORMAL: {
-                "conscientiousness": 0.05,
-                "stability": 0.03
-            },
-            EvolutionPath.VARIANT: {
-                "openness": 0.08,
-                "curiosity": 0.05
-            },
-            EvolutionPath.SPECIAL: {
-                "extraversion": 0.1,
-                "confidence": 0.07
-            },
-            EvolutionPath.CORRUPTED: {
-                "neuroticism": 0.15,
-                "aggression": 0.1,
-                "happiness_decay_rate": 1.2
-            },
-            EvolutionPath.LEGENDARY: {
-                "all_traits": 0.1,
-                "relationship_bonus": 10
-            }
-        }
-        
-        effects = path_personality_effects.get(EvolutionPath(evolution_path), {})
-        
-        for trait, change in effects.items():
-            if trait == "all_traits":
-                # Boost all personality traits
-                for personality_trait in ["openness", "conscientiousness", "extraversion", "agreeableness"]:
-                    if hasattr(monster.personality, personality_trait):
-                        current = getattr(monster.personality, personality_trait)
-                        setattr(monster.personality, personality_trait, min(1.0, current + change))
-            elif trait == "relationship_bonus":
-                monster.personality.relationship_level = min(100, monster.personality.relationship_level + change)
-            elif hasattr(monster.personality, trait):
-                current = getattr(monster.personality, trait)
-                setattr(monster.personality, trait, min(1.0, max(0.0, current + change)))
-    
-    async def _check_requirement(self, monster: Monster, req_type: str, req_value: Any) -> bool:
-        """Check if a specific requirement is met"""
-        try:
-            if req_type == "age_minutes":
-                return monster.lifecycle.age_minutes >= req_value
-            
-            elif req_type == "care_mistakes_max":
-                return monster.lifecycle.care_mistakes <= req_value
-            
-            elif req_type == "care_mistakes_min":
-                return monster.lifecycle.care_mistakes >= req_value
-            
-            elif req_type == "stats_min":
-                return self._check_stat_requirements(monster, req_value)
-            
-            elif req_type == "training_min":
-                return self._check_training_requirements(monster, req_value)
-            
-            elif req_type == "care_quality_min":
-                return monster.stats.care_quality_score >= req_value
-            
-            elif req_type == "health_min":
-                return monster.stats.health >= req_value
-            
-            elif req_type == "happiness_max":
-                return monster.stats.happiness <= req_value
-            
-            elif req_type == "happiness_min":
-                return monster.stats.happiness >= req_value
-            
-            elif req_type == "discipline_min":
-                return monster.stats.discipline >= req_value
-            
-            elif req_type == "relationship_level_min":
-                return monster.personality.relationship_level >= req_value
-            
-            elif req_type == "special_item":
-                return req_value in monster.inventory and monster.inventory[req_value] > 0
-            
-            elif req_type == "special_achievements":
-                return self._check_special_achievements(monster, req_value)
-            
-            elif req_type == "battle_wins_min":
-                return monster.performance_metrics.get("battle_wins", 0) >= req_value
-            
-            elif req_type == "overtraining":
-                return self._check_overtraining(monster)
-            
-            else:
-                self.logger.warning(f"Unknown requirement type: {req_type}")
-                return False
-                
-        except Exception as e:
-            self.logger.error(f"Requirement check failed for {req_type}: {e}")
-            return False
-    
-    def _check_stat_requirements(self, monster: Monster, requirements: Dict[str, int]) -> bool:
-        """Check if stat requirements are met"""
-        for stat_name, min_value in requirements.items():
-            if hasattr(monster.stats, stat_name):
-                current_value = getattr(monster.stats, stat_name)
-                if current_value < min_value:
-                    return False
-            else:
-                return False
-        return True
-    
-    def _check_training_requirements(self, monster: Monster, requirements: Dict[str, int]) -> bool:
-        """Check if training requirements are met"""
-        for training_type, min_value in requirements.items():
-            current_value = monster.stats.training_progress.get(training_type, 0)
-            if current_value < min_value:
-                return False
-        return True
-    
-    def _check_item_requirements(self, monster: Monster, item_name: str) -> bool:
-        """Check if monster has required item"""
-        return item_name in monster.inventory and monster.inventory[item_name] > 0
-    
-    def _check_special_achievements(self, monster: Monster, required_achievements: List[str]) -> bool:
-        """Check if special achievements are unlocked"""
-        achievements = monster.performance_metrics.get("special_achievements", [])
-        achievement_types = [a.get("type") for a in achievements if isinstance(a, dict)]
-        
-        for required in required_achievements:
-            if required not in achievement_types:
-                return False
-        return True
-    
-    def _check_overtraining(self, monster: Monster) -> bool:
-        """Check if monster is overtrained"""
-        training_totals = sum(monster.stats.training_progress.values())
-        return training_totals > 800  # Threshold for overtraining
-    
-    def _check_perfect_care_week(self, monster: Monster) -> bool:
-        """Check if monster had perfect care for a week"""
-        # Simplified check - would need more complex tracking in production
-        return monster.lifecycle.care_mistakes == 0 and monster.lifecycle.age_minutes >= 10080  # 7 days
-    
-    def _check_master_trainer(self, monster: Monster) -> bool:
-        """Check if all training types are at 150+"""
-        for training_type in ["strength", "endurance", "intelligence", "dexterity", "spirit", "technique"]:
-            if monster.stats.training_progress.get(training_type, 0) < 150:
-                return False
-        return True
-    
-    def _check_evolution_master(self, monster: Monster) -> bool:
-        """Check if player has evolved many monsters"""
-        # This would need global tracking in production
-        evolutions = [a for a in monster.performance_metrics.get("special_achievements", []) 
-                     if isinstance(a, dict) and a.get("type") == "evolution"]
-        return len(evolutions) >= 10
-    
-    def _get_current_value(self, monster: Monster, req_type: str) -> Any:
-        """Get current value for a requirement type"""
-        value_getters = {
-            "age_minutes": lambda: monster.lifecycle.age_minutes,
-            "care_mistakes_max": lambda: monster.lifecycle.care_mistakes,
-            "care_mistakes_min": lambda: monster.lifecycle.care_mistakes,
-            "health_min": lambda: monster.stats.health,
-            "happiness_max": lambda: monster.stats.happiness,
-            "happiness_min": lambda: monster.stats.happiness,
-            "discipline_min": lambda: monster.stats.discipline,
-            "care_quality_min": lambda: monster.stats.care_quality_score,
-            "relationship_level_min": lambda: monster.personality.relationship_level,
-            "battle_wins_min": lambda: monster.performance_metrics.get("battle_wins", 0)
-        }
-        
-        getter = value_getters.get(req_type)
-        return getter() if getter else "N/A"
-    
-    def _estimate_time_to_eligible(self, missing_requirements: List[Dict[str, Any]]) -> str:
-        """Estimate time until evolution requirements are met"""
-        time_estimates = []
-        
-        for req in missing_requirements:
-            req_type = req["type"]
-            
-            if req_type == "age_minutes":
-                current = req["current_value"]
-                required = req["requirement"]
-                remaining_minutes = required - current
-                time_estimates.append(f"{remaining_minutes/1440:.1f} days")
-            
-            elif "training" in req_type:
-                # Estimate based on training rate
-                time_estimates.append("1-3 days of training")
-            
-            elif "stat" in req_type:
-                # Estimate based on training and care
-                time_estimates.append("2-5 days of care/training")
-            
-            else:
-                time_estimates.append("Variable")
-        
-        return ", ".join(time_estimates) if time_estimates else "Ready now"
\ No newline at end of file
diff --git a/src/core/monster_engine.py b/src/core/monster_engine.py
deleted file mode 100644
index 93bd024bc0b380a1b73c44e69e274d08a1adaf7f..0000000000000000000000000000000000000000
--- a/src/core/monster_engine.py
+++ /dev/null
@@ -1,488 +0,0 @@
-from pydantic import BaseModel, Field, validator
-from typing import Dict, List, Optional, Any, Union
-from datetime import datetime, timedelta
-from enum import Enum
-import uuid
-import asyncio
-import json
-import numpy as np
-from dataclasses import dataclass
-
-class EvolutionStage(str, Enum):
-    EGG = "egg"
-    BABY = "baby"
-    CHILD = "child"
-    ADULT = "adult"
-    PERFECT = "perfect"
-    ULTIMATE = "ultimate"
-    MEGA = "mega"
-
-class MonsterPersonalityType(str, Enum):
-    PLAYFUL = "playful"
-    SERIOUS = "serious"
-    CURIOUS = "curious"
-    GENTLE = "gentle"
-    ENERGETIC = "energetic"
-    CALM = "calm"
-    MISCHIEVOUS = "mischievous"
-    LOYAL = "loyal"
-
-class EmotionalState(str, Enum):
-    ECSTATIC = "ecstatic"
-    HAPPY = "happy"
-    CONTENT = "content"
-    NEUTRAL = "neutral"
-    MELANCHOLY = "melancholy"
-    SAD = "sad"
-    ANGRY = "angry"
-    SICK = "sick"
-    EXCITED = "excited"
-    TIRED = "tired"
-
-@dataclass
-class StatBonus:
-    multiplier: float = 1.0
-    flat_bonus: int = 0
-    duration_minutes: int = 0
-    source: str = ""
-
-class AdvancedMonsterStats(BaseModel):
-    # Primary Care Stats (0-100)
-    health: int = Field(default=100, ge=0, le=100)
-    hunger: int = Field(default=100, ge=0, le=100)
-    happiness: int = Field(default=100, ge=0, le=100)
-    energy: int = Field(default=100, ge=0, le=100)
-    discipline: int = Field(default=50, ge=0, le=100)
-    cleanliness: int = Field(default=100, ge=0, le=100)
-    
-    # Battle Stats (Digimon World 1 inspired)
-    life: int = Field(default=1000, ge=0)
-    mp: int = Field(default=100, ge=0)
-    offense: int = Field(default=100, ge=0)
-    defense: int = Field(default=100, ge=0)
-    speed: int = Field(default=100, ge=0)
-    brains: int = Field(default=100, ge=0)
-    
-    # Training Progress
-    training_progress: Dict[str, int] = Field(default_factory=lambda: {
-        "strength": 0,
-        "endurance": 0,
-        "intelligence": 0,
-        "dexterity": 0,
-        "spirit": 0,
-        "technique": 0
-    })
-    
-    # Active Bonuses
-    active_bonuses: List[StatBonus] = Field(default_factory=list)
-    
-    # Performance Metrics
-    care_quality_score: float = Field(default=1.0, ge=0.0, le=2.0)
-    evolution_potential: float = Field(default=1.0, ge=0.0, le=2.0)
-
-class AIPersonality(BaseModel):
-    # Core Personality Traits
-    primary_type: MonsterPersonalityType = Field(default=MonsterPersonalityType.PLAYFUL)
-    secondary_type: Optional[MonsterPersonalityType] = Field(default=None)
-    
-    # Trait Values (0.0-1.0)
-    openness: float = Field(default=0.5, ge=0.0, le=1.0)
-    conscientiousness: float = Field(default=0.5, ge=0.0, le=1.0)
-    extraversion: float = Field(default=0.5, ge=0.0, le=1.0)
-    agreeableness: float = Field(default=0.5, ge=0.0, le=1.0)
-    neuroticism: float = Field(default=0.5, ge=0.0, le=1.0)
-    
-    # Learned Preferences
-    favorite_foods: List[str] = Field(default_factory=list)
-    disliked_foods: List[str] = Field(default_factory=list)
-    preferred_activities: List[str] = Field(default_factory=list)
-    communication_style: str = Field(default="friendly")
-    
-    # Emotional Memory
-    emotional_memories: List[Dict[str, Any]] = Field(default_factory=list)
-    relationship_level: int = Field(default=0, ge=0, le=100)
-
-class ConversationContext(BaseModel):
-    # Recent Conversation History
-    messages: List[Dict[str, Any]] = Field(default_factory=list)
-    
-    # Context Compression
-    personality_summary: str = Field(default="")
-    relationship_summary: str = Field(default="")
-    recent_events_summary: str = Field(default="")
-    
-    # Interaction Statistics
-    total_conversations: int = Field(default=0)
-    avg_conversation_length: float = Field(default=0.0)
-    last_interaction: Optional[datetime] = Field(default=None)
-    interaction_frequency: float = Field(default=0.0)  # interactions per day
-    
-    # Emotional Context
-    current_mood_factors: Dict[str, float] = Field(default_factory=dict)
-    mood_history: List[Dict[str, Any]] = Field(default_factory=list)
-
-class AdvancedLifecycle(BaseModel):
-    # Time Tracking
-    age_minutes: float = Field(default=0.0)
-    stage: EvolutionStage = Field(default=EvolutionStage.EGG)
-    generation: int = Field(default=1)
-    
-    # Care History
-    care_mistakes: int = Field(default=0)
-    perfect_care_streaks: int = Field(default=0)
-    total_training_sessions: int = Field(default=0)
-    
-    # Evolution Data
-    evolution_requirements_met: List[str] = Field(default_factory=list)
-    evolution_locked_until: Optional[datetime] = Field(default=None)
-    special_evolution_conditions: Dict[str, bool] = Field(default_factory=dict)
-    
-    # Lifespan Management
-    base_lifespan_minutes: float = Field(default=21600.0)  # 15 days
-    lifespan_modifiers: List[float] = Field(default_factory=list)
-    death_prevention_items: int = Field(default=0)
-
-class Monster(BaseModel):
-    # Identity
-    id: str = Field(default_factory=lambda: str(uuid.uuid4()))
-    name: str = Field(default="DigiPal")
-    species: str = Field(default="Botamon")
-    variant: Optional[str] = Field(default=None)
-    
-    # Core Systems
-    stats: AdvancedMonsterStats = Field(default_factory=AdvancedMonsterStats)
-    personality: AIPersonality = Field(default_factory=AIPersonality)
-    lifecycle: AdvancedLifecycle = Field(default_factory=AdvancedLifecycle)
-    conversation: ConversationContext = Field(default_factory=ConversationContext)
-    
-    # Current State
-    emotional_state: EmotionalState = Field(default=EmotionalState.CONTENT)
-    current_activity: str = Field(default="idle")
-    location: str = Field(default="nursery")
-    
-    # Timestamps
-    created_at: datetime = Field(default_factory=datetime.now)
-    last_update: datetime = Field(default_factory=datetime.now)
-    last_interaction: Optional[datetime] = Field(default=None)
-    
-    # Items and Inventory
-    inventory: Dict[str, int] = Field(default_factory=dict)
-    
-    # 3D Model
-    model_url: Optional[str] = Field(default=None)
-    equipped_items: Dict[str, str] = Field(default_factory=dict)
-    
-    # Conversation History for API
-    conversation_history: List[Dict[str, Any]] = Field(default_factory=list)
-    
-    # Breeding and Genetics
-    genetic_markers: Dict[str, Any] = Field(default_factory=dict)
-    parent_ids: List[str] = Field(default_factory=list)
-    offspring_ids: List[str] = Field(default_factory=list)
-    
-    # Performance Tracking
-    performance_metrics: Dict[str, float] = Field(default_factory=dict)
-    
-    class Config:
-        json_encoders = {
-            datetime: lambda v: v.isoformat()
-        }
-    
-    def __init__(self, **data):
-        # Handle personality initialization
-        if 'personality' in data and isinstance(data['personality'], MonsterPersonalityType):
-            personality_type = data.pop('personality')
-            data['personality'] = AIPersonality(type=personality_type)
-        super().__init__(**data)
-    
-    def calculate_emotional_state(self) -> EmotionalState:
-        """Calculate current emotional state based on multiple factors"""
-        # Health-based emotions
-        if self.stats.health < 20:
-            return EmotionalState.SICK
-        
-        # Happiness-based emotions
-        if self.stats.happiness >= 95:
-            return EmotionalState.ECSTATIC
-        elif self.stats.happiness >= 80:
-            return EmotionalState.HAPPY
-        elif self.stats.happiness >= 60:
-            return EmotionalState.CONTENT
-        elif self.stats.happiness >= 40:
-            return EmotionalState.NEUTRAL
-        elif self.stats.happiness >= 20:
-            return EmotionalState.MELANCHOLY
-        elif self.stats.happiness >= 10:
-            return EmotionalState.SAD
-        
-        # Energy-based emotions
-        if self.stats.energy < 20:
-            return EmotionalState.TIRED
-        
-        # Discipline-based emotions
-        if self.stats.discipline < 20 and self.stats.hunger > 80:
-            return EmotionalState.ANGRY
-        
-        # Special conditions
-        if self.current_activity in ["training", "playing"]:
-            return EmotionalState.EXCITED
-        
-        return EmotionalState.NEUTRAL
-    
-    def get_evolution_readiness(self) -> Dict[str, Any]:
-        """Calculate evolution readiness and requirements"""
-        current_requirements = self._get_stage_requirements()
-        met_requirements = []
-        missing_requirements = []
-        
-        for req_type, requirement in current_requirements.items():
-            if self._check_requirement(req_type, requirement):
-                met_requirements.append(req_type)
-            else:
-                missing_requirements.append({
-                    "type": req_type,
-                    "requirement": requirement,
-                    "current": self._get_current_value(req_type)
-                })
-        
-        readiness_percentage = len(met_requirements) / len(current_requirements) * 100 if current_requirements else 0
-        
-        return {
-            "readiness_percentage": readiness_percentage,
-            "met_requirements": met_requirements,
-            "missing_requirements": missing_requirements,
-            "can_evolve": len(missing_requirements) == 0,
-            "next_stage": self._get_next_evolution_stage()
-        }
-    
-    def _get_stage_requirements(self) -> Dict[str, Any]:
-        """Get evolution requirements for current stage"""
-        requirements = {
-            EvolutionStage.EGG: {
-                "age_minutes": 60,  # 1 hour
-                "care_mistakes_max": 0
-            },
-            EvolutionStage.BABY: {
-                "age_minutes": 1440,  # 24 hours
-                "stats_min": {"life": 1200, "offense": 120, "defense": 120},
-                "care_mistakes_max": 2
-            },
-            EvolutionStage.CHILD: {
-                "age_minutes": 4320,  # 72 hours
-                "stats_min": {"life": 1500, "offense": 150, "defense": 150, "brains": 150},
-                "care_mistakes_max": 5,
-                "training_min": {"strength": 50, "intelligence": 50}
-            },
-            EvolutionStage.ADULT: {
-                "age_minutes": 8640,  # 144 hours (6 days)
-                "stats_min": {"life": 2000, "offense": 200, "defense": 200, "brains": 200},
-                "care_mistakes_max": 8,
-                "training_min": {"strength": 100, "intelligence": 100},
-                "care_quality_min": 1.2
-            }
-        }
-        return requirements.get(self.lifecycle.stage, {})
-    
-    def _check_requirement(self, req_type: str, requirement: Any) -> bool:
-        """Check if a specific requirement is met"""
-        if req_type == "age_minutes":
-            return self.lifecycle.age_minutes >= requirement
-        elif req_type == "care_mistakes_max":
-            return self.lifecycle.care_mistakes <= requirement
-        elif req_type == "stats_min":
-            for stat, min_val in requirement.items():
-                if getattr(self.stats, stat, 0) < min_val:
-                    return False
-            return True
-        elif req_type == "training_min":
-            for training, min_val in requirement.items():
-                if self.stats.training_progress.get(training, 0) < min_val:
-                    return False
-            return True
-        elif req_type == "care_quality_min":
-            return self.stats.care_quality_score >= requirement
-        return False
-    
-    def _get_current_value(self, req_type: str) -> Any:
-        """Get current value for a requirement type"""
-        if req_type == "age_minutes":
-            return self.lifecycle.age_minutes
-        elif req_type == "care_mistakes_max":
-            return self.lifecycle.care_mistakes
-        elif req_type.startswith("stats_"):
-            return {stat: getattr(self.stats, stat, 0) for stat in ["life", "offense", "defense", "brains"]}
-        elif req_type.startswith("training_"):
-            return self.stats.training_progress
-        elif req_type == "care_quality_min":
-            return self.stats.care_quality_score
-        return None
-    
-    def _get_next_evolution_stage(self) -> Optional[EvolutionStage]:
-        """Get the next evolution stage"""
-        stage_order = [
-            EvolutionStage.EGG,
-            EvolutionStage.BABY,
-            EvolutionStage.CHILD,
-            EvolutionStage.ADULT,
-            EvolutionStage.PERFECT,
-            EvolutionStage.ULTIMATE,
-            EvolutionStage.MEGA
-        ]
-        
-        current_index = stage_order.index(self.lifecycle.stage)
-        if current_index < len(stage_order) - 1:
-            return stage_order[current_index + 1]
-        return None
-    
-    def apply_time_effects(self, minutes_elapsed: float):
-        """Apply time-based effects to monster"""
-        # Age progression
-        self.lifecycle.age_minutes += minutes_elapsed
-        
-        # Stat decay rates (per hour)
-        decay_rates = {
-            "hunger": 2.0,
-            "happiness": 0.8,
-            "energy": 1.2,
-            "cleanliness": 0.6,
-            "discipline": 0.2
-        }
-        
-        # Apply decay
-        hours_elapsed = minutes_elapsed / 60.0
-        for stat, rate in decay_rates.items():
-            current_value = getattr(self.stats, stat)
-            decay_amount = rate * hours_elapsed
-            
-            # Apply personality modifiers
-            if stat == "happiness" and self.personality.neuroticism > 0.7:
-                decay_amount *= 1.3
-            if stat == "energy" and self.personality.extraversion < 0.3:
-                decay_amount *= 0.8
-            
-            new_value = max(0, current_value - decay_amount)
-            setattr(self.stats, stat, int(new_value))
-        
-        # Health effects from poor care
-        if self.stats.hunger < 20:
-            health_loss = hours_elapsed * 3
-            self.stats.health = max(0, self.stats.health - int(health_loss))
-            self.lifecycle.care_mistakes += 1
-        
-        if self.stats.cleanliness < 30:
-            health_loss = hours_elapsed * 1.5
-            self.stats.health = max(0, self.stats.health - int(health_loss))
-        
-        # Update emotional state
-        self.emotional_state = self.calculate_emotional_state()
-        self.last_update = datetime.now()
-    
-    # API-compatible properties
-    @property
-    def stage(self) -> EvolutionStage:
-        """Get current evolution stage"""
-        return self.lifecycle.stage
-    
-    @property
-    def level(self) -> int:
-        """Calculate level based on stats"""
-        total_stats = (
-            self.stats.offense + 
-            self.stats.defense + 
-            self.stats.speed + 
-            self.stats.brains
-        )
-        return min(99, total_stats // 40)
-    
-    @property
-    def age(self) -> int:
-        """Get age in hours"""
-        return int(self.lifecycle.age_minutes / 60)
-    
-    # API-compatible methods
-    def get_stats(self) -> Dict[str, Any]:
-        """Get stats in API format"""
-        return {
-            "health": self.stats.health,
-            "happiness": self.stats.happiness,
-            "hunger": self.stats.hunger,
-            "energy": self.stats.energy,
-            "discipline": self.stats.discipline,
-            "cleanliness": self.stats.cleanliness
-        }
-    
-    def feed(self, food_type: str = "balanced") -> Dict[str, Any]:
-        """Feed the monster"""
-        hunger_reduction = {
-            "meat": 40,
-            "vegetable": 30,
-            "balanced": 35,
-            "treat": 20
-        }.get(food_type, 30)
-        
-        self.stats.hunger = max(0, self.stats.hunger - hunger_reduction)
-        self.stats.happiness = min(100, self.stats.happiness + 5)
-        
-        if food_type == "treat":
-            self.stats.happiness = min(100, self.stats.happiness + 10)
-            self.stats.discipline = max(0, self.stats.discipline - 5)
-        
-        return {"message": f"Fed {self.name} with {food_type}"}
-    
-    def train(self, training_type: str = "strength") -> Dict[str, Any]:
-        """Train the monster"""
-        if self.stats.energy < 20:
-            return {"message": "Too tired to train"}
-        
-        stat_gains = {
-            "strength": {"offense": 5, "defense": 2},
-            "defense": {"defense": 5, "offense": 2},
-            "speed": {"speed": 5},
-            "intelligence": {"brains": 5}
-        }.get(training_type, {"offense": 3, "defense": 3})
-        
-        for stat, gain in stat_gains.items():
-            current = getattr(self.stats, stat, 0)
-            setattr(self.stats, stat, min(999, current + gain))
-        
-        self.stats.energy = max(0, self.stats.energy - 15)
-        self.stats.discipline = min(100, self.stats.discipline + 5)
-        
-        return {"message": f"Trained {training_type}"}
-    
-    def play(self) -> Dict[str, Any]:
-        """Play with the monster"""
-        self.stats.happiness = min(100, self.stats.happiness + 15)
-        self.stats.energy = max(0, self.stats.energy - 10)
-        self.stats.discipline = max(0, self.stats.discipline - 2)
-        return {"message": f"Played with {self.name}"}
-    
-    def clean(self) -> Dict[str, Any]:
-        """Clean the monster"""
-        self.stats.cleanliness = 100
-        self.stats.happiness = min(100, self.stats.happiness + 5)
-        return {"message": f"Cleaned {self.name}"}
-    
-    def heal(self) -> Dict[str, Any]:
-        """Heal the monster"""
-        self.stats.health = 100
-        self.stats.happiness = max(0, self.stats.happiness - 10)
-        return {"message": f"Healed {self.name}"}
-    
-    def discipline(self) -> Dict[str, Any]:
-        """Discipline the monster"""
-        self.stats.discipline = min(100, self.stats.discipline + 20)
-        self.stats.happiness = max(0, self.stats.happiness - 15)
-        return {"message": f"Disciplined {self.name}"}
-    
-    def rest(self) -> Dict[str, Any]:
-        """Let the monster rest"""
-        self.stats.energy = min(100, self.stats.energy + 30)
-        self.stats.happiness = min(100, self.stats.happiness + 5)
-        return {"message": f"{self.name} is resting"}
-    
-    def update_time_based_stats(self):
-        """Update stats based on time elapsed"""
-        now = datetime.now()
-        minutes_elapsed = (now - self.last_update).total_seconds() / 60
-        self.apply_time_effects(minutes_elapsed)
\ No newline at end of file
diff --git a/src/core/monster_engine_dw1.py b/src/core/monster_engine_dw1.py
deleted file mode 100644
index f01e03004d1ec10a5d04bbea66036f110253f8cc..0000000000000000000000000000000000000000
--- a/src/core/monster_engine_dw1.py
+++ /dev/null
@@ -1,919 +0,0 @@
-"""
-Enhanced Monster Engine with DW1-Accurate Mechanics
-Based on SydMontague and Vicen04's reverse engineering research
-"""
-
-import asyncio
-import json
-import random
-import time
-from dataclasses import dataclass, field
-from datetime import datetime, timedelta
-from enum import Enum, auto
-from typing import Dict, List, Optional, Tuple, Any
-import numpy as np
-
-# DW1-Accurate Constants based on reverse engineering
-DW1_CONSTANTS = {
-    "LIFESPAN_DAYS": {
-        "BABY": 1,
-        "CHILD": 3,
-        "ADULT": 5,
-        "PERFECT": 6,
-        "ULTIMATE": 7,
-        "MEGA": 8
-    },
-    "STAT_CAPS": {
-        "HP": 9999,
-        "MP": 9999,
-        "OFFENSE": 999,
-        "DEFENSE": 999,
-        "SPEED": 999,
-        "BRAINS": 999,
-        "WEIGHT": {"MIN": 5, "MAX": 99}
-    },
-    "CARE_MISTAKE_THRESHOLDS": {
-        "PERFECT_CARE": 0,
-        "GOOD_CARE": 3,
-        "NORMAL_CARE": 5,
-        "POOR_CARE": 10
-    },
-    "TRAINING_GAIN_FORMULA": {
-        # Based on Vicen04's findings: 1% of best enemy stat × number of enemies
-        "BASE_MULTIPLIER": 0.01,
-        "PERFECT_BONUS": 1.5,
-        "GREAT_BONUS": 1.2,
-        "GOOD_BONUS": 1.0,
-        "MISS_PENALTY": 0.5
-    },
-    "EVOLUTION_HOURS": {
-        "EGG_TO_BABY": 1,
-        "BABY_TO_CHILD": 6,
-        "CHILD_TO_ADULT": 24,
-        "ADULT_TO_PERFECT": 72,
-        "PERFECT_TO_ULTIMATE": 96,
-        "ULTIMATE_TO_MEGA": 144
-    },
-    "TOILET_TIMER": 180,  # 3 hours in minutes
-    "SLEEP_SCHEDULE": {
-        "BEDTIME": 20,  # 8 PM
-        "WAKETIME": 8   # 8 AM
-    },
-    "SICKNESS_CHANCE": 0.02,  # 2% per hour when conditions are poor
-    "DEATH_CHANCE_PER_DAY_OVER_LIFESPAN": 0.20  # 20% per day over natural lifespan
-}
-
-class SpeciesType(Enum):
-    """DW1-accurate species types"""
-    DATA = "Data"
-    VACCINE = "Vaccine"
-    VIRUS = "Virus"
-    FREE = "Free"
-
-class Personality(Enum):
-    """DW1-inspired personality types affecting behavior"""
-    BRAVE = "Brave"      # +Offense, -Defense
-    CALM = "Calm"        # +Defense, -Speed
-    ENERGETIC = "Energetic"  # +Speed, -MP
-    CLEVER = "Clever"    # +Brains, +MP, -HP
-    FRIENDLY = "Friendly"  # Balanced, easier to care for
-
-class Stage(Enum):
-    """Evolution stages with DW1 naming"""
-    EGG = "Egg"
-    BABY = "Baby"
-    CHILD = "Child"
-    ADULT = "Adult"
-    PERFECT = "Perfect"
-    ULTIMATE = "Ultimate"
-    MEGA = "Mega"
-
-class TrainingType(Enum):
-    """DW1 training types from various gym locations"""
-    HP = "Green Gym"      # HP training
-    MP = "Beetle Land"    # MP/Brains training
-    OFFENSE = "Dojo"      # Offense training
-    DEFENSE = "Ice Gym"   # Defense training
-    SPEED = "Speed Gym"   # Speed training
-    BRAINS = "Library"    # Brains training
-
-class Sickness(Enum):
-    """DW1 sickness types"""
-    NONE = "Healthy"
-    COLD = "Cold"         # From low temperature or rain
-    INJURY = "Injury"     # From battles or training
-    FATIGUE = "Fatigue"   # From overwork
-    STOMACH = "Stomach"   # From bad food or overeating
-
-class Technique:
-    """DW1-style battle technique"""
-    def __init__(self, name: str, power: int, mp_cost: int, accuracy: int, 
-                 tech_type: str, range_type: str = "Close"):
-        self.name = name
-        self.power = power
-        self.mp_cost = mp_cost
-        self.accuracy = accuracy
-        self.tech_type = tech_type  # Fire, Ice, Electric, etc.
-        self.range_type = range_type  # Close, Mid, Far
-
-@dataclass
-class BattleStats:
-    """DW1-accurate battle statistics"""
-    hp: int = 100
-    max_hp: int = 100
-    mp: int = 20
-    max_mp: int = 20
-    offense: int = 10
-    defense: int = 10
-    speed: int = 10
-    brains: int = 10
-    
-    # DW1-specific stats
-    weight: int = 10  # Affects speed and some evolutions
-    nature: int = 50  # 0-100, affects which techniques can be learned
-    
-    # Battle-specific
-    techniques: List[Technique] = field(default_factory=list)
-    battles_won: int = 0
-    battles_lost: int = 0
-    
-    def calculate_damage(self, technique: Technique, target_defense: int) -> int:
-        """DW1 damage calculation formula"""
-        base_damage = (self.offense * technique.power / 100) - (target_defense / 2)
-        # Add randomness like DW1
-        variance = random.uniform(0.85, 1.15)
-        return max(1, int(base_damage * variance))
-
-@dataclass
-class CareStats:
-    """DW1-accurate care statistics"""
-    happiness: int = 50  # 0-100
-    discipline: int = 50  # 0-100
-    
-    # DW1-specific needs
-    hunger: int = 50     # 0-100, depletes faster than other stats
-    energy: int = 100    # 0-100, depletes with activity
-    toilet: int = 0      # 0-100, fills up over time
-    
-    # Care quality tracking
-    care_mistakes: int = 0
-    perfect_care_hours: int = 0
-    training_count: int = 0
-    
-    # Sickness and status
-    sickness: Sickness = Sickness.NONE
-    is_sleeping: bool = False
-    last_fed: float = field(default_factory=time.time)
-    last_toilet: float = field(default_factory=time.time)
-    
-    def needs_toilet(self) -> bool:
-        """Check if monster needs bathroom based on DW1 mechanics"""
-        return self.toilet >= 80
-
-@dataclass
-class EvolutionData:
-    """DW1-accurate evolution tracking"""
-    current_stage: Stage = Stage.EGG
-    age_hours: float = 0.0
-    birth_time: float = field(default_factory=time.time)
-    evolution_time: Optional[float] = None
-    
-    # Evolution requirements tracking
-    stat_requirements_met: Dict[str, bool] = field(default_factory=dict)
-    weight_requirement_met: bool = False
-    care_requirement_met: bool = False
-    battle_requirement_met: bool = False
-    
-    # Special evolution flags
-    has_perfect_care_week: bool = False
-    has_tournament_win: bool = False
-    virus_busters_unlocked: bool = False
-    
-    def get_lifespan_remaining(self) -> float:
-        """Calculate remaining lifespan in hours"""
-        max_days = DW1_CONSTANTS["LIFESPAN_DAYS"].get(self.current_stage.value, 5)
-        max_hours = max_days * 24
-        return max(0, max_hours - self.age_hours)
-
-class DW1Monster:
-    """Complete DW1-accurate monster implementation"""
-    
-    def __init__(self, name: str, species_type: SpeciesType = SpeciesType.DATA,
-                 personality: Optional[Personality] = None):
-        self.id = str(time.time())
-        self.name = name
-        self.species_type = species_type
-        self.personality = personality or random.choice(list(Personality))
-        
-        # Initialize all stats systems
-        self.battle_stats = BattleStats()
-        self.care_stats = CareStats()
-        self.evolution = EvolutionData()
-        
-        # DW1-specific data
-        self.location = "File City"  # Current location in the world
-        self.recruited_digimon: List[str] = []  # For city building
-        self.items_inventory: Dict[str, int] = {}  # Item storage
-        
-        # Apply personality modifiers
-        self._apply_personality_modifiers()
-        
-        # Initialize with baby stats if not egg
-        if self.evolution.current_stage != Stage.EGG:
-            self._initialize_baby_stats()
-    
-    def _apply_personality_modifiers(self):
-        """Apply DW1-style personality stat modifiers"""
-        modifiers = {
-            Personality.BRAVE: {"offense": 1.2, "defense": 0.8},
-            Personality.CALM: {"defense": 1.2, "speed": 0.8},
-            Personality.ENERGETIC: {"speed": 1.2, "max_mp": 0.8},
-            Personality.CLEVER: {"brains": 1.2, "max_mp": 1.2, "max_hp": 0.8},
-            Personality.FRIENDLY: {}  # No modifiers, easier care
-        }
-        
-        mods = modifiers.get(self.personality, {})
-        for stat, multiplier in mods.items():
-            if hasattr(self.battle_stats, stat):
-                current = getattr(self.battle_stats, stat)
-                setattr(self.battle_stats, stat, int(current * multiplier))
-    
-    def _initialize_baby_stats(self):
-        """Initialize baby form with DW1-appropriate stats"""
-        self.battle_stats.max_hp = random.randint(50, 80)
-        self.battle_stats.hp = self.battle_stats.max_hp
-        self.battle_stats.max_mp = random.randint(10, 20)
-        self.battle_stats.mp = self.battle_stats.max_mp
-        self.battle_stats.offense = random.randint(5, 15)
-        self.battle_stats.defense = random.randint(5, 15)
-        self.battle_stats.speed = random.randint(5, 15)
-        self.battle_stats.brains = random.randint(5, 15)
-        self.battle_stats.weight = random.randint(5, 15)
-    
-    def update(self, delta_time: float):
-        """Update monster state based on DW1 mechanics"""
-        # Update age
-        self.evolution.age_hours += delta_time / 3600.0
-        
-        # Check for death (DW1 mortality system)
-        if self._check_death():
-            return False  # Monster died
-        
-        # Update needs
-        self._update_needs(delta_time)
-        
-        # Check for sickness
-        self._check_sickness()
-        
-        # Update sleep state
-        self._update_sleep_state()
-        
-        # Check evolution
-        self._check_evolution()
-        
-        return True  # Monster still alive
-    
-    def _check_death(self) -> bool:
-        """DW1 death mechanics based on age and care"""
-        lifespan_remaining = self.evolution.get_lifespan_remaining()
-        
-        if lifespan_remaining <= 0:
-            # Past natural lifespan, increasing chance of death
-            days_over = abs(lifespan_remaining) / 24
-            death_chance = DW1_CONSTANTS["DEATH_CHANCE_PER_DAY_OVER_LIFESPAN"] * days_over
-            
-            # Good care reduces death chance
-            if self.care_stats.care_mistakes < 3:
-                death_chance *= 0.5
-            
-            if random.random() < death_chance / (24 * 60):  # Per minute chance
-                return True
-        
-        # Can also die from severe neglect
-        if (self.care_stats.happiness < 10 and 
-            self.care_stats.hunger > 90 and
-            self.care_stats.energy < 10):
-            if random.random() < 0.1:  # 10% chance when severely neglected
-                return True
-        
-        return False
-    
-    def _update_needs(self, delta_time: float):
-        """Update care needs based on DW1 degradation rates"""
-        # Time-based degradation (per minute)
-        if not self.care_stats.is_sleeping:
-            # Hunger depletes faster during activity
-            self.care_stats.hunger = min(100, self.care_stats.hunger + delta_time * 0.5)
-            # Energy depletes based on activity
-            self.care_stats.energy = max(0, self.care_stats.energy - delta_time * 0.3)
-            # Happiness slowly decreases without interaction
-            self.care_stats.happiness = max(0, self.care_stats.happiness - delta_time * 0.1)
-        else:
-            # Sleeping restores energy
-            self.care_stats.energy = min(100, self.care_stats.energy + delta_time * 1.0)
-        
-        # Toilet need increases over time
-        time_since_toilet = time.time() - self.care_stats.last_toilet
-        if time_since_toilet > DW1_CONSTANTS["TOILET_TIMER"] * 60:
-            self.care_stats.toilet = min(100, self.care_stats.toilet + delta_time * 0.8)
-        
-        # Track care mistakes
-        if self.care_stats.hunger > 80:
-            self.care_stats.care_mistakes += delta_time * 0.01
-        if self.care_stats.toilet > 90:
-            self.care_stats.care_mistakes += delta_time * 0.02
-        if self.care_stats.happiness < 20:
-            self.care_stats.care_mistakes += delta_time * 0.01
-    
-    def _check_sickness(self):
-        """DW1 sickness system based on care quality"""
-        if self.care_stats.sickness != Sickness.NONE:
-            return  # Already sick
-        
-        # Calculate sickness chance based on conditions
-        sickness_chance = 0.0
-        
-        # Poor care increases sickness chance
-        if self.care_stats.happiness < 30:
-            sickness_chance += DW1_CONSTANTS["SICKNESS_CHANCE"]
-        if self.care_stats.energy < 20:
-            sickness_chance += DW1_CONSTANTS["SICKNESS_CHANCE"]
-        if self.care_stats.toilet > 90:
-            sickness_chance += DW1_CONSTANTS["SICKNESS_CHANCE"] * 2
-        
-        # Overtraining can cause injury
-        if self.care_stats.training_count > 10:
-            sickness_chance += DW1_CONSTANTS["SICKNESS_CHANCE"]
-            
-        if random.random() < sickness_chance:
-            # Determine sickness type based on cause
-            if self.care_stats.toilet > 90:
-                self.care_stats.sickness = Sickness.STOMACH
-            elif self.care_stats.energy < 20:
-                self.care_stats.sickness = Sickness.FATIGUE
-            elif self.care_stats.training_count > 10:
-                self.care_stats.sickness = Sickness.INJURY
-            else:
-                self.care_stats.sickness = Sickness.COLD
-    
-    def _update_sleep_state(self):
-        """DW1 sleep schedule system"""
-        current_hour = datetime.now().hour
-        bedtime = DW1_CONSTANTS["SLEEP_SCHEDULE"]["BEDTIME"]
-        waketime = DW1_CONSTANTS["SLEEP_SCHEDULE"]["WAKETIME"]
-        
-        # Check if it's sleep time
-        if bedtime <= current_hour or current_hour < waketime:
-            if not self.care_stats.is_sleeping:
-                self.care_stats.is_sleeping = True
-                # Can't sleep if needs are urgent
-                if self.care_stats.hunger > 90 or self.care_stats.toilet > 90:
-                    self.care_stats.care_mistakes += 1
-        else:
-            self.care_stats.is_sleeping = False
-    
-    def _check_evolution(self):
-        """DW1-accurate evolution checking system"""
-        # Can't evolve if sick or sleeping
-        if self.care_stats.sickness != Sickness.NONE or self.care_stats.is_sleeping:
-            return
-        
-        # Check age requirements
-        hours_required = DW1_CONSTANTS["EVOLUTION_HOURS"].get(
-            f"{self.evolution.current_stage.value}_TO_{self._get_next_stage()}", 
-            float('inf')
-        )
-        
-        if self.evolution.age_hours < hours_required:
-            return
-        
-        # Check evolution requirements based on current stage
-        can_evolve = False
-        next_form = None
-        
-        if self.evolution.current_stage == Stage.BABY:
-            # Baby to Child is mostly automatic with minimal requirements
-            can_evolve = True
-            next_form = self._determine_child_evolution()
-        elif self.evolution.current_stage == Stage.CHILD:
-            # Child to Adult requires meeting stat thresholds
-            can_evolve, next_form = self._check_adult_evolution()
-        elif self.evolution.current_stage == Stage.ADULT:
-            # Adult to Perfect requires excellent care and high stats
-            can_evolve, next_form = self._check_perfect_evolution()
-        elif self.evolution.current_stage == Stage.PERFECT:
-            # Perfect to Ultimate is very demanding
-            can_evolve, next_form = self._check_ultimate_evolution()
-        elif self.evolution.current_stage == Stage.ULTIMATE:
-            # Ultimate to Mega requires special conditions
-            can_evolve, next_form = self._check_mega_evolution()
-        
-        if can_evolve and next_form:
-            self._evolve_to(next_form)
-    
-    def _get_next_stage(self) -> str:
-        """Get the next evolution stage"""
-        stage_order = [Stage.EGG, Stage.BABY, Stage.CHILD, Stage.ADULT, 
-                       Stage.PERFECT, Stage.ULTIMATE, Stage.MEGA]
-        current_index = stage_order.index(self.evolution.current_stage)
-        if current_index < len(stage_order) - 1:
-            return stage_order[current_index + 1].value
-        return self.evolution.current_stage.value
-    
-    def _determine_child_evolution(self) -> str:
-        """Determine which Child form to evolve into based on DW1 logic"""
-        # Based on initial care and personality
-        if self.care_stats.care_mistakes < 3:
-            if self.personality == Personality.BRAVE:
-                return "Agumon"
-            elif self.personality == Personality.CALM:
-                return "Gabumon"
-            elif self.personality == Personality.ENERGETIC:
-                return "Patamon"
-            elif self.personality == Personality.CLEVER:
-                return "Tentomon"
-            else:
-                return "Elecmon"
-        else:
-            # Poor care leads to weaker forms
-            return random.choice(["Kunemon", "Palmon", "Betamon"])
-    
-    def _check_adult_evolution(self) -> Tuple[bool, Optional[str]]:
-        """Check requirements for Adult evolution"""
-        # Stat requirements vary by target form
-        stat_total = (self.battle_stats.offense + self.battle_stats.defense + 
-                      self.battle_stats.speed + self.battle_stats.brains)
-        
-        # Weight requirements (some forms need specific weight ranges)
-        weight_ok = 15 <= self.battle_stats.weight <= 35
-        
-        # Care requirements
-        care_ok = self.care_stats.care_mistakes < DW1_CONSTANTS["CARE_MISTAKE_THRESHOLDS"]["NORMAL_CARE"]
-        
-        if stat_total > 200 and weight_ok and care_ok:
-            # Determine form based on highest stats
-            highest_stat = max(
-                ("offense", self.battle_stats.offense),
-                ("defense", self.battle_stats.defense),
-                ("speed", self.battle_stats.speed),
-                ("brains", self.battle_stats.brains),
-                key=lambda x: x[1]
-            )[0]
-            
-            forms = {
-                "offense": "Greymon" if self.species_type == SpeciesType.VACCINE else "Devimon",
-                "defense": "Monochromon" if self.species_type == SpeciesType.DATA else "Garurumon", 
-                "speed": "Birdramon" if self.species_type == SpeciesType.VACCINE else "Kabuterimon",
-                "brains": "Centarumon" if self.species_type == SpeciesType.DATA else "Bakemon"
-            }
-            
-            return True, forms.get(highest_stat, "Numemon")
-        
-        # Failed requirements lead to Numemon (like DW1)
-        return True, "Numemon"
-    
-    def _check_perfect_evolution(self) -> Tuple[bool, Optional[str]]:
-        """Check requirements for Perfect evolution"""
-        # Much stricter requirements
-        stat_total = (self.battle_stats.offense + self.battle_stats.defense + 
-                      self.battle_stats.speed + self.battle_stats.brains)
-        
-        # Need excellent care
-        care_excellent = self.care_stats.care_mistakes < DW1_CONSTANTS["CARE_MISTAKE_THRESHOLDS"]["GOOD_CARE"]
-        
-        # Need battle experience
-        battles_ok = self.battle_stats.battles_won >= 15
-        
-        # Need high stats
-        stats_ok = stat_total > 500
-        
-        if care_excellent and battles_ok and stats_ok:
-            # Determine based on current form and stats
-            if "Greymon" in str(self):
-                return True, "MetalGreymon"
-            elif "Garurumon" in str(self):
-                return True, "WereGarurumon"
-            # Add more evolution paths
-            
-        return False, None
-    
-    def _check_ultimate_evolution(self) -> Tuple[bool, Optional[str]]:
-        """Check requirements for Ultimate evolution"""
-        # Even stricter requirements
-        # Perfect care week required
-        perfect_week = self.evolution.has_perfect_care_week
-        
-        # Very high stats
-        stat_total = (self.battle_stats.offense + self.battle_stats.defense + 
-                      self.battle_stats.speed + self.battle_stats.brains)
-        stats_ok = stat_total > 800
-        
-        # Lots of battles
-        battles_ok = self.battle_stats.battles_won >= 30
-        
-        if perfect_week and stats_ok and battles_ok:
-            # Special evolutions based on conditions
-            if self.evolution.has_tournament_win:
-                return True, "WarGreymon"
-            # Add more paths
-            
-        return False, None
-    
-    def _check_mega_evolution(self) -> Tuple[bool, Optional[str]]:
-        """Check requirements for Mega evolution (beyond original DW1)"""
-        # Requires special items or conditions
-        # This is an extension beyond DW1
-        return False, None
-    
-    def _evolve_to(self, new_form: str):
-        """Execute evolution to new form"""
-        # Update stage
-        old_stage = self.evolution.current_stage
-        self.evolution.current_stage = self._get_stage_from_form(new_form)
-        self.evolution.evolution_time = time.time()
-        
-        # Boost stats based on evolution
-        stat_boost = {
-            Stage.CHILD: 1.5,
-            Stage.ADULT: 2.0,
-            Stage.PERFECT: 1.8,
-            Stage.ULTIMATE: 1.5,
-            Stage.MEGA: 1.3
-        }.get(self.evolution.current_stage, 1.0)
-        
-        # Apply stat boosts
-        self.battle_stats.max_hp = int(self.battle_stats.max_hp * stat_boost)
-        self.battle_stats.hp = self.battle_stats.max_hp
-        self.battle_stats.max_mp = int(self.battle_stats.max_mp * stat_boost)
-        self.battle_stats.mp = self.battle_stats.max_mp
-        self.battle_stats.offense = int(self.battle_stats.offense * stat_boost)
-        self.battle_stats.defense = int(self.battle_stats.defense * stat_boost)
-        self.battle_stats.speed = int(self.battle_stats.speed * stat_boost)
-        self.battle_stats.brains = int(self.battle_stats.brains * stat_boost)
-        
-        # Learn new techniques based on form
-        self._learn_evolution_techniques(new_form)
-    
-    def _get_stage_from_form(self, form: str) -> Stage:
-        """Map form name to evolution stage"""
-        # This would have a complete mapping of all forms
-        baby_forms = ["Botamon", "Punimon", "Poyomon", "Yuramon"]
-        child_forms = ["Agumon", "Gabumon", "Patamon", "Tentomon", "Elecmon", 
-                       "Kunemon", "Palmon", "Betamon"]
-        adult_forms = ["Greymon", "Garurumon", "Angemon", "Kabuterimon", 
-                       "Leomon", "Devimon", "Numemon"]
-        perfect_forms = ["MetalGreymon", "WereGarurumon", "MagnaAngemon", 
-                         "Andromon", "Giromon"]
-        ultimate_forms = ["WarGreymon", "MetalGarurumon", "Seraphimon", 
-                          "Machinedramon"]
-        mega_forms = ["Omnimon", "Imperialdramon", "Alphamon"]
-        
-        if form in baby_forms:
-            return Stage.BABY
-        elif form in child_forms:
-            return Stage.CHILD
-        elif form in adult_forms:
-            return Stage.ADULT
-        elif form in perfect_forms:
-            return Stage.PERFECT
-        elif form in ultimate_forms:
-            return Stage.ULTIMATE
-        elif form in mega_forms:
-            return Stage.MEGA
-        
-        return self.evolution.current_stage
-    
-    def _learn_evolution_techniques(self, new_form: str):
-        """Learn techniques based on new form"""
-        # Each form has signature techniques
-        technique_db = {
-            "Agumon": [Technique("Pepper Breath", 30, 4, 90, "Fire")],
-            "Greymon": [Technique("Nova Blast", 80, 12, 85, "Fire", "Mid")],
-            "MetalGreymon": [Technique("Giga Destroyer", 150, 25, 80, "Fire", "Far")],
-            # Add complete technique database
-        }
-        
-        if new_form in technique_db:
-            for tech in technique_db[new_form]:
-                if tech not in self.battle_stats.techniques:
-                    self.battle_stats.techniques.append(tech)
-    
-    # Care Actions
-    def feed(self, food_type: str = "meat") -> Dict[str, Any]:
-        """DW1-accurate feeding system"""
-        if self.care_stats.is_sleeping:
-            return {"success": False, "message": "Can't feed while sleeping"}
-        
-        # Food effects based on DW1
-        food_effects = {
-            "meat": {"hunger": -20, "weight": 1, "happiness": 5},
-            "giant_meat": {"hunger": -50, "weight": 5, "happiness": 10},
-            "sirloin": {"hunger": -30, "weight": 2, "hp": 100},
-            "fish": {"hunger": -25, "weight": 0, "happiness": 5},
-            "digimushroom": {"hunger": -15, "weight": -2, "mp": 50},
-            "happymushroom": {"hunger": -10, "happiness": 20, "weight": -1}
-        }
-        
-        effects = food_effects.get(food_type, food_effects["meat"])
-        
-        # Apply effects
-        self.care_stats.hunger = max(0, self.care_stats.hunger + effects.get("hunger", 0))
-        self.battle_stats.weight = max(5, min(99, self.battle_stats.weight + effects.get("weight", 0)))
-        self.care_stats.happiness = min(100, self.care_stats.happiness + effects.get("happiness", 0))
-        
-        if "hp" in effects:
-            self.battle_stats.hp = min(self.battle_stats.max_hp, 
-                                       self.battle_stats.hp + effects["hp"])
-        if "mp" in effects:
-            self.battle_stats.mp = min(self.battle_stats.max_mp, 
-                                       self.battle_stats.mp + effects["mp"])
-        
-        self.care_stats.last_fed = time.time()
-        
-        # Check for overeating
-        if self.care_stats.hunger < 20 and effects.get("hunger", 0) < -20:
-            self.care_stats.care_mistakes += 0.5
-            return {"success": True, "message": "Overfed! Be careful..."}
-        
-        return {"success": True, "message": f"Fed {food_type}"}
-    
-    def toilet(self) -> Dict[str, Any]:
-        """DW1 toilet system"""
-        if self.care_stats.toilet < 50:
-            return {"success": False, "message": "Doesn't need to go"}
-        
-        self.care_stats.toilet = 0
-        self.care_stats.last_toilet = time.time()
-        self.care_stats.happiness += 5
-        
-        # Prevent care mistakes
-        if self.care_stats.toilet > 80:
-            self.care_stats.care_mistakes = max(0, self.care_stats.care_mistakes - 0.5)
-        
-        return {"success": True, "message": "Toilet successful"}
-    
-    def train(self, training_type: TrainingType) -> Dict[str, Any]:
-        """DW1-accurate training system with mini-game results"""
-        if self.care_stats.is_sleeping:
-            return {"success": False, "message": "Can't train while sleeping"}
-        
-        if self.care_stats.energy < 20:
-            return {"success": False, "message": "Too tired to train"}
-        
-        if self.care_stats.sickness != Sickness.NONE:
-            return {"success": False, "message": "Can't train while sick"}
-        
-        # Simulate training mini-game result
-        performance = self._simulate_training_performance()
-        
-        # Calculate stat gains based on performance
-        base_gain = 10  # Base training gain
-        
-        if performance == "PERFECT":
-            multiplier = DW1_CONSTANTS["TRAINING_GAIN_FORMULA"]["PERFECT_BONUS"]
-        elif performance == "GREAT":
-            multiplier = DW1_CONSTANTS["TRAINING_GAIN_FORMULA"]["GREAT_BONUS"]
-        elif performance == "GOOD":
-            multiplier = DW1_CONSTANTS["TRAINING_GAIN_FORMULA"]["GOOD_BONUS"]
-        else:  # MISS
-            multiplier = DW1_CONSTANTS["TRAINING_GAIN_FORMULA"]["MISS_PENALTY"]
-            self.care_stats.care_mistakes += 0.5
-        
-        stat_gain = int(base_gain * multiplier)
-        
-        # Apply gains based on training type
-        stat_map = {
-            TrainingType.HP: "max_hp",
-            TrainingType.MP: "max_mp",
-            TrainingType.OFFENSE: "offense",
-            TrainingType.DEFENSE: "defense",
-            TrainingType.SPEED: "speed",
-            TrainingType.BRAINS: "brains"
-        }
-        
-        trained_stat = stat_map.get(training_type)
-        if trained_stat:
-            current = getattr(self.battle_stats, trained_stat)
-            cap = DW1_CONSTANTS["STAT_CAPS"].get(trained_stat.upper(), 999)
-            new_value = min(cap, current + stat_gain)
-            setattr(self.battle_stats, trained_stat, new_value)
-            
-            # Update current HP/MP if max increased
-            if trained_stat == "max_hp":
-                self.battle_stats.hp = min(self.battle_stats.hp + stat_gain, 
-                                           self.battle_stats.max_hp)
-            elif trained_stat == "max_mp":
-                self.battle_stats.mp = min(self.battle_stats.mp + stat_gain, 
-                                           self.battle_stats.max_mp)
-        
-        # Training costs energy
-        self.care_stats.energy -= 15
-        self.care_stats.training_count += 1
-        
-        # Small happiness boost for good performance
-        if performance in ["PERFECT", "GREAT"]:
-            self.care_stats.happiness = min(100, self.care_stats.happiness + 5)
-        
-        return {
-            "success": True,
-            "performance": performance,
-            "stat_gain": stat_gain,
-            "message": f"{performance}! Gained {stat_gain} {trained_stat}"
-        }
-    
-    def _simulate_training_performance(self) -> str:
-        """Simulate training mini-game performance"""
-        # Performance affected by happiness and discipline
-        base_chance = 0.5
-        if self.care_stats.happiness > 70:
-            base_chance += 0.2
-        if self.care_stats.discipline > 70:
-            base_chance += 0.2
-        
-        roll = random.random()
-        if roll < base_chance * 0.2:
-            return "PERFECT"
-        elif roll < base_chance * 0.5:
-            return "GREAT"
-        elif roll < base_chance:
-            return "GOOD"
-        else:
-            return "MISS"
-    
-    def heal(self, item: str = "medicine") -> Dict[str, Any]:
-        """Heal sickness using DW1 items"""
-        if self.care_stats.sickness == Sickness.NONE:
-            return {"success": False, "message": "Not sick"}
-        
-        # Different items cure different ailments
-        cure_map = {
-            "medicine": [Sickness.COLD, Sickness.STOMACH],
-            "bandage": [Sickness.INJURY],
-            "restore": [Sickness.FATIGUE],
-            "cure_all": [Sickness.COLD, Sickness.STOMACH, Sickness.INJURY, Sickness.FATIGUE]
-        }
-        
-        if self.care_stats.sickness in cure_map.get(item, []):
-            self.care_stats.sickness = Sickness.NONE
-            self.care_stats.happiness += 10
-            return {"success": True, "message": f"Cured with {item}"}
-        
-        return {"success": False, "message": f"{item} doesn't cure {self.care_stats.sickness.value}"}
-    
-    def battle(self, opponent: 'DW1Monster') -> Dict[str, Any]:
-        """Execute DW1-style turn-based battle"""
-        battle_log = []
-        turn = 0
-        
-        while self.battle_stats.hp > 0 and opponent.battle_stats.hp > 0:
-            turn += 1
-            
-            # Speed determines turn order
-            if self.battle_stats.speed >= opponent.battle_stats.speed:
-                first, second = self, opponent
-            else:
-                first, second = opponent, self
-            
-            # First attacker
-            damage = first._execute_attack(second)
-            battle_log.append(f"{first.name} deals {damage} damage")
-            second.battle_stats.hp = max(0, second.battle_stats.hp - damage)
-            
-            if second.battle_stats.hp <= 0:
-                break
-            
-            # Second attacker
-            damage = second._execute_attack(first)
-            battle_log.append(f"{second.name} deals {damage} damage")
-            first.battle_stats.hp = max(0, first.battle_stats.hp - damage)
-            
-            # Limit battle length
-            if turn > 50:
-                break
-        
-        # Determine winner
-        if self.battle_stats.hp > opponent.battle_stats.hp:
-            self.battle_stats.battles_won += 1
-            # Stat gains from victory (1% of enemy's best stat)
-            best_stat = max(opponent.battle_stats.offense, opponent.battle_stats.defense,
-                           opponent.battle_stats.speed, opponent.battle_stats.brains)
-            stat_gain = max(1, int(best_stat * 0.01))
-            
-            # Randomly boost a stat
-            stat_to_boost = random.choice(["offense", "defense", "speed", "brains"])
-            current = getattr(self.battle_stats, stat_to_boost)
-            setattr(self.battle_stats, stat_to_boost, current + stat_gain)
-            
-            return {
-                "success": True,
-                "winner": self.name,
-                "stat_gained": stat_to_boost,
-                "amount": stat_gain,
-                "log": battle_log
-            }
-        else:
-            self.battle_stats.battles_lost += 1
-            return {
-                "success": False,
-                "winner": opponent.name,
-                "log": battle_log
-            }
-    
-    def _execute_attack(self, target: 'DW1Monster') -> int:
-        """Execute attack with technique selection"""
-        # Simple AI: use random available technique
-        if self.battle_stats.techniques:
-            usable_techniques = [t for t in self.battle_stats.techniques 
-                               if t.mp_cost <= self.battle_stats.mp]
-            if usable_techniques:
-                technique = random.choice(usable_techniques)
-                self.battle_stats.mp -= technique.mp_cost
-                return self.battle_stats.calculate_damage(technique, target.battle_stats.defense)
-        
-        # Basic attack if no techniques
-        base_damage = self.battle_stats.offense - (target.battle_stats.defense / 2)
-        return max(1, int(base_damage * random.uniform(0.85, 1.15)))
-    
-    def to_dict(self) -> Dict[str, Any]:
-        """Serialize monster to dictionary"""
-        return {
-            "id": self.id,
-            "name": self.name,
-            "species_type": self.species_type.value,
-            "personality": self.personality.value,
-            "battle_stats": {
-                "hp": self.battle_stats.hp,
-                "max_hp": self.battle_stats.max_hp,
-                "mp": self.battle_stats.mp,
-                "max_mp": self.battle_stats.max_mp,
-                "offense": self.battle_stats.offense,
-                "defense": self.battle_stats.defense,
-                "speed": self.battle_stats.speed,
-                "brains": self.battle_stats.brains,
-                "weight": self.battle_stats.weight,
-                "nature": self.battle_stats.nature,
-                "battles_won": self.battle_stats.battles_won,
-                "battles_lost": self.battle_stats.battles_lost,
-                "techniques": [{"name": t.name, "power": t.power, "mp_cost": t.mp_cost} 
-                              for t in self.battle_stats.techniques]
-            },
-            "care_stats": {
-                "happiness": self.care_stats.happiness,
-                "discipline": self.care_stats.discipline,
-                "hunger": self.care_stats.hunger,
-                "energy": self.care_stats.energy,
-                "toilet": self.care_stats.toilet,
-                "care_mistakes": self.care_stats.care_mistakes,
-                "perfect_care_hours": self.care_stats.perfect_care_hours,
-                "training_count": self.care_stats.training_count,
-                "sickness": self.care_stats.sickness.value,
-                "is_sleeping": self.care_stats.is_sleeping
-            },
-            "evolution": {
-                "current_stage": self.evolution.current_stage.value,
-                "age_hours": self.evolution.age_hours,
-                "birth_time": self.evolution.birth_time,
-                "has_perfect_care_week": self.evolution.has_perfect_care_week,
-                "has_tournament_win": self.evolution.has_tournament_win
-            },
-            "location": self.location,
-            "recruited_digimon": self.recruited_digimon,
-            "items_inventory": self.items_inventory
-        }
-    
-    @classmethod
-    def from_dict(cls, data: Dict[str, Any]) -> 'DW1Monster':
-        """Deserialize monster from dictionary"""
-        monster = cls(
-            name=data["name"],
-            species_type=SpeciesType(data["species_type"]),
-            personality=Personality(data["personality"])
-        )
-        
-        # Restore all stats
-        for key, value in data["battle_stats"].items():
-            if key == "techniques":
-                monster.battle_stats.techniques = [
-                    Technique(t["name"], t["power"], t["mp_cost"], 90, "Normal")
-                    for t in value
-                ]
-            elif hasattr(monster.battle_stats, key):
-                setattr(monster.battle_stats, key, value)
-        
-        for key, value in data["care_stats"].items():
-            if key == "sickness":
-                monster.care_stats.sickness = Sickness(value)
-            elif hasattr(monster.care_stats, key):
-                setattr(monster.care_stats, key, value)
-        
-        for key, value in data["evolution"].items():
-            if key == "current_stage":
-                monster.evolution.current_stage = Stage(value)
-            elif hasattr(monster.evolution, key):
-                setattr(monster.evolution, key, value)
-        
-        monster.location = data.get("location", "File City")
-        monster.recruited_digimon = data.get("recruited_digimon", [])
-        monster.items_inventory = data.get("items_inventory", {})
-        
-        return monster
\ No newline at end of file
diff --git a/src/deployment/__init__.py b/src/deployment/__init__.py
deleted file mode 100644
index 97db1bd5626565ad580410790c1163c72b9eeac9..0000000000000000000000000000000000000000
--- a/src/deployment/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Deployment module initialization
\ No newline at end of file
diff --git a/src/deployment/zero_gpu_optimizer.py b/src/deployment/zero_gpu_optimizer.py
deleted file mode 100644
index 3a02e239dc22d31135f10167824532c8097b9a3c..0000000000000000000000000000000000000000
--- a/src/deployment/zero_gpu_optimizer.py
+++ /dev/null
@@ -1,221 +0,0 @@
-import os
-import torch
-import psutil
-import logging
-from typing import Dict, Any, Optional
-import spaces
-from functools import wraps
-import asyncio
-import time
-
-def get_optimal_device():
-    """Get the optimal device for computation"""
-    if torch.cuda.is_available():
-        return torch.device("cuda")
-    elif torch.backends.mps.is_available():
-        return torch.device("mps")
-    else:
-        return torch.device("cpu")
-
-class ZeroGPUOptimizer:
-    def __init__(self):
-        self.logger = logging.getLogger(__name__)
-        self.is_zero_gpu_available = self._check_zero_gpu_availability()
-        self.resource_cache = {}
-        self.last_resource_check = 0
-        
-    def _check_zero_gpu_availability(self) -> bool:
-        """Check if Zero GPU is available"""
-        try:
-            import spaces
-            return hasattr(spaces, 'GPU')
-        except ImportError:
-            return False
-    
-    async def detect_available_resources(self) -> Dict[str, Any]:
-        """Detect available computational resources"""
-        current_time = time.time()
-        
-        # Cache resources for 60 seconds
-        if (current_time - self.last_resource_check) < 60 and self.resource_cache:
-            return self.resource_cache
-        
-        try:
-            # CPU Information
-            cpu_count = psutil.cpu_count(logical=True) or 2  # Fallback to 2 if None
-            cpu_freq = psutil.cpu_freq()
-            memory = psutil.virtual_memory()
-            
-            # Enhanced GPU Information
-            gpu_available = torch.cuda.is_available()
-            gpu_count = torch.cuda.device_count() if gpu_available else 0
-            gpu_memory_gb = 0
-            gpu_name = "None"
-            gpu_actually_accessible = False
-            
-            if gpu_available and gpu_count > 0:
-                try:
-                    # Test if GPU is actually accessible
-                    torch.cuda.current_device()
-                    torch.cuda.empty_cache()
-                    gpu_memory_bytes = torch.cuda.get_device_properties(0).total_memory
-                    gpu_memory_gb = gpu_memory_bytes / (1024**3)
-                    gpu_name = torch.cuda.get_device_name(0)
-                    gpu_actually_accessible = True
-                    self.logger.info(f"GPU accessible: {gpu_name} with {gpu_memory_gb:.1f}GB")
-                except Exception as gpu_error:
-                    self.logger.warning(f"GPU detected but not accessible: {gpu_error}")
-                    gpu_available = False
-                    gpu_count = 0
-            
-            # Check for Zero GPU (Spaces environment)
-            zero_gpu_active = self.is_zero_gpu_available and (
-                os.getenv("SPACE_ID") is not None or 
-                os.getenv("SPACES_ZERO_GPU") == "true" or
-                gpu_actually_accessible
-            )
-            
-            resources = {
-                "cpu_count": cpu_count,
-                "cpu_frequency_mhz": cpu_freq.current if cpu_freq else 0,
-                "total_memory_gb": memory.total / (1024**3),
-                "available_memory_gb": memory.available / (1024**3),
-                "gpu_available": gpu_available,
-                "gpu_count": gpu_count,
-                "gpu_memory_gb": gpu_memory_gb,
-                "gpu_name": gpu_name,
-                "zero_gpu_available": zero_gpu_active,
-                "compute_capability": self._determine_compute_capability(gpu_memory_gb, cpu_count)
-            }
-            
-            self.resource_cache = resources
-            self.last_resource_check = current_time
-            
-            self.logger.info(f"Detected resources: {resources}")
-            return resources
-            
-        except Exception as e:
-            self.logger.error(f"Resource detection failed: {e}")
-            return {
-                "cpu_count": 2,
-                "gpu_available": False,
-                "gpu_memory_gb": 0,
-                "compute_capability": "basic"
-            }
-    
-    def _determine_compute_capability(self, gpu_memory_gb: float, cpu_count: int) -> str:
-        """Determine compute capability tier"""
-        if gpu_memory_gb >= 16:
-            return "premium"  # Can run large models
-        elif gpu_memory_gb >= 8:
-            return "high"     # Can run medium models
-        elif gpu_memory_gb >= 4:
-            return "medium"   # Can run small models
-        elif cpu_count >= 8:
-            return "cpu_optimized"  # CPU inference
-        else:
-            return "basic"    # Limited capability
-    
-    def zero_gpu_decorator(self, duration: int = 120):
-        """Decorator for Zero GPU allocation"""
-        if not self.is_zero_gpu_available:
-            # Fallback for non-Zero GPU environments
-            def decorator(func):
-                @wraps(func)
-                async def wrapper(*args, **kwargs):
-                    return await func(*args, **kwargs)
-                return wrapper
-            return decorator
-        
-        # Use actual Zero GPU decorator
-        def decorator(func):
-            @spaces.GPU(duration=duration)
-            @wraps(func)
-            async def wrapper(*args, **kwargs):
-                return await func(*args, **kwargs)
-            return wrapper
-        return decorator
-    
-    async def optimize_model_loading(self, model_config: Dict[str, Any]) -> Dict[str, Any]:
-        """Optimize model loading based on available resources"""
-        resources = await self.detect_available_resources()
-        
-        # Adjust configuration based on resources
-        optimized_config = model_config.copy()
-        
-        if resources["compute_capability"] == "basic":
-            optimized_config.update({
-                "model_name": "Qwen/Qwen2.5-0.5B-Instruct",
-                "torch_dtype": "float32",
-                "device_map": "cpu",
-                "use_quantization": True
-            })
-        elif resources["compute_capability"] == "cpu_optimized":
-            optimized_config.update({
-                "model_name": "Qwen/Qwen2.5-1.5B-Instruct",
-                "torch_dtype": "float32",
-                "device_map": "cpu",
-                "use_quantization": True
-            })
-        elif resources["compute_capability"] == "medium":
-            optimized_config.update({
-                "model_name": "Qwen/Qwen2.5-1.5B-Instruct",
-                "torch_dtype": "float16",
-                "device_map": "auto",
-                "use_quantization": True
-            })
-        elif resources["compute_capability"] == "high":
-            optimized_config.update({
-                "model_name": "Qwen/Qwen2.5-3B-Instruct",
-                "torch_dtype": "bfloat16",
-                "device_map": "auto",
-                "use_quantization": False
-            })
-        else:  # premium
-            optimized_config.update({
-                "model_name": "Qwen/Qwen2.5-7B-Instruct",
-                "torch_dtype": "bfloat16",
-                "device_map": "auto",
-                "use_quantization": False
-            })
-        
-        self.logger.info(f"Optimized model config: {optimized_config}")
-        return optimized_config
-    
-    def get_deployment_config(self) -> Dict[str, Any]:
-        """Get optimized deployment configuration"""
-        resources = asyncio.run(self.detect_available_resources())
-        
-        base_config = {
-            "max_threads": min(40, resources["cpu_count"] * 2),
-            "enable_queue": True,
-            "show_error": True,
-            "quiet": False
-        }
-        
-        # Adjust based on compute capability
-        if resources["compute_capability"] in ["basic", "cpu_optimized"]:
-            base_config.update({
-                "max_threads": resources["cpu_count"],
-                "concurrency_count": 1
-            })
-        elif resources["compute_capability"] == "medium":
-            base_config.update({
-                "concurrency_count": 2
-            })
-        else:
-            base_config.update({
-                "concurrency_count": 4
-            })
-        
-        return base_config
-    
-    def is_gpu_environment(self) -> bool:
-        """Check if we're in a GPU-enabled environment"""
-        if not self.resource_cache:
-            # Synchronously check resources
-            try:
-                return torch.cuda.is_available() and torch.cuda.device_count() > 0
-            except:
-                return False
-        return self.resource_cache.get("gpu_available", False)
\ No newline at end of file
diff --git a/src/pipelines/opensource_3d_pipeline_v2.py b/src/pipelines/opensource_3d_pipeline_v2.py
deleted file mode 100644
index 435641791311087312afb446432704bd8ef1e9bd..0000000000000000000000000000000000000000
--- a/src/pipelines/opensource_3d_pipeline_v2.py
+++ /dev/null
@@ -1,1314 +0,0 @@
-"""
-Production-Ready Open-Source Text-to-Rigged-3D Pipeline
-Uses HuggingFace Spaces API for Flux, Hunyuan3D implementation, and UniRig models
-Rick Rubin philosophy: Strip complexity, amplify creativity
-"""
-
-import asyncio
-import json
-import logging
-import os
-import shutil
-import subprocess
-import time
-from dataclasses import dataclass
-from enum import Enum
-from pathlib import Path
-from typing import Dict, List, Optional, Tuple, Any, Union
-import numpy as np
-import torch
-import trimesh
-from PIL import Image
-import requests
-from gradio_client import Client
-import huggingface_hub
-from huggingface_hub import snapshot_download, hf_hub_download
-from transformers import AutoTokenizer, AutoModel
-
-# Configure logging
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-
-@dataclass
-class ProductionConfig:
-    """Production configuration for open-source pipeline"""
-    
-    # Text-to-image model options  
-    text_to_image_model: str = "omnigen2"  # omnigen2 is primary, flux as fallback
-    omnigen2_repo: str = "shitao/OmniGen-v1"  # Updated to working repo
-    flux_space: str = "black-forest-labs/FLUX.1-dev"  # Kept for fallback
-    
-    # 3D Generation models
-    hunyuan3d_model: str = "tencent/Hunyuan3D-2.1"  # Updated to latest version
-    hunyuan3d_space: str = "tencent/Hunyuan3D-2.1"  # Official Gradio Space
-    unirig_repo: str = "https://github.com/VAST-AI-Research/UniRig"
-    unirig_hf_model: str = "VAST-AI/UniRig"
-    
-    # Generation settings
-    image_resolution: int = 1024
-    num_views: int = 6  # Front, back, left, right, top, 3/4
-    guidance_scale: float = 3.5
-    inference_steps: int = 28
-    
-    # 3D settings
-    hunyuan3d_resolution: int = 1024
-    target_polycount: int = 30000
-    texture_resolution: int = 2048
-    
-    # Paths
-    output_dir: Path = Path("./digipal_3d_output")
-    cache_dir: Path = Path("./digipal_model_cache")
-    temp_dir: Path = Path("./digipal_temp")
-    
-    # Hardware
-    device: str = "cuda" if torch.cuda.is_available() else "cpu"
-    hf_token: Optional[str] = None  # For private spaces/models
-    
-    # OmniGen2 settings
-    text_guidance_scale: float = 3.0
-    image_guidance_scale: float = 1.6
-    max_pixels: int = 1048576  # 1024x1024 equivalent
-    enable_cpu_offload: bool = True  # For VRAM optimization
-
-class OmniGen2MultiViewGenerator:
-    """Generate multi-view images using OmniGen"""
-    
-    def __init__(self, config: ProductionConfig):
-        self.config = config
-        self.model = None
-        self.tokenizer = None
-        logger.info(f"Initializing OmniGen from: {config.omnigen2_repo}")
-        
-    def _load_model(self):
-        """Lazy load the OmniGen model to save memory"""
-        if self.model is None:
-            try:
-                # Import OmniGen specific modules
-                from diffusers import DiffusionPipeline
-                
-                # Load OmniGen model using diffusers pipeline
-                self.model = DiffusionPipeline.from_pretrained(
-                    self.config.omnigen2_repo,
-                    trust_remote_code=True,
-                    torch_dtype=torch.float16 if self.config.device == "cuda" else torch.float32,
-                    device_map="auto" if self.config.enable_cpu_offload else None
-                )
-                
-                if not self.config.enable_cpu_offload:
-                    self.model = self.model.to(self.config.device)
-                    
-                # Enable memory efficient attention if available
-                if hasattr(self.model, 'enable_attention_slicing'):
-                    self.model.enable_attention_slicing()
-                if hasattr(self.model, 'enable_vae_slicing'):
-                    self.model.enable_vae_slicing()
-                    
-                logger.info("OmniGen model loaded successfully")
-            except Exception as e:
-                logger.error(f"Failed to load OmniGen model: {str(e)}")
-                logger.info("Trying alternative loading method...")
-                try:
-                    # Fallback: try loading as a generic model
-                    self.model = AutoModel.from_pretrained(
-                        self.config.omnigen2_repo,
-                        trust_remote_code=True,
-                        torch_dtype=torch.float16 if self.config.device == "cuda" else torch.float32,
-                        device_map="auto" if self.config.enable_cpu_offload else None
-                    )
-                    if not self.config.enable_cpu_offload:
-                        self.model = self.model.to(self.config.device)
-                    logger.info("OmniGen model loaded via fallback method")
-                except Exception as fallback_e:
-                    logger.error(f"Fallback loading also failed: {fallback_e}")
-                    raise
-    
-    async def generate_creature_views(self, base_prompt: str, 
-                                    creature_name: str) -> Dict[str, Path]:
-        """Generate multiple consistent views of a creature using OmniGen2"""
-        
-        self._load_model()
-        
-        # View specifications for 3D reconstruction
-        view_specs = {
-            "front": {
-                "angle": "front view",
-                "pose": "T-pose, arms spread",
-                "details": "facing camera directly"
-            },
-            "back": {
-                "angle": "back view", 
-                "pose": "T-pose, arms spread",
-                "details": "showing back details"
-            },
-            "left": {
-                "angle": "left side profile",
-                "pose": "T-pose, arms spread",
-                "details": "perfect 90 degree side view"
-            },
-            "right": {
-                "angle": "right side profile",
-                "pose": "T-pose, arms spread", 
-                "details": "perfect 90 degree side view"
-            },
-            "front_angle": {
-                "angle": "3/4 front view",
-                "pose": "dynamic action pose",
-                "details": "3/4 angled perspective"
-            },
-            "top": {
-                "angle": "top-down view",
-                "pose": "T-pose, arms spread",
-                "details": "bird's eye view from above"
-            }
-        }
-        
-        output_paths = {}
-        
-        for view_name, spec in view_specs.items():
-            try:
-                # Construct detailed prompt for this view
-                full_prompt = (
-                    f"{base_prompt}, {spec['angle']}, {spec['pose']}, "
-                    f"{spec['details']}, professional 3D reference sheet, "
-                    f"clean white background, high quality, detailed, "
-                    f"consistent lighting, monster character design"
-                )
-                
-                # Generate image using OmniGen
-                with torch.no_grad():
-                    if hasattr(self.model, '__call__'):
-                        # Standard diffusers pipeline call
-                        result = self.model(
-                            prompt=full_prompt,
-                            width=self.config.image_resolution,
-                            height=self.config.image_resolution,
-                            guidance_scale=self.config.text_guidance_scale,
-                            num_inference_steps=self.config.inference_steps,
-                            generator=torch.Generator(device=self.config.device).manual_seed(42 + len(output_paths))
-                        )
-                        
-                        # Extract the image from the result
-                        if hasattr(result, 'images'):
-                            image = result.images[0]
-                        elif isinstance(result, list):
-                            image = result[0]
-                        else:
-                            image = result
-                            
-                    elif hasattr(self.model, 'generate'):
-                        # Alternative generation method for different model types
-                        result = self.model.generate(
-                            prompt=full_prompt,
-                            image_size=(self.config.image_resolution, self.config.image_resolution),
-                            guidance_scale=self.config.text_guidance_scale,
-                            num_inference_steps=self.config.inference_steps
-                        )
-                        
-                        if isinstance(result, torch.Tensor):
-                            # Convert tensor to PIL Image
-                            image_array = result.cpu().numpy().squeeze()
-                            if image_array.ndim == 3 and image_array.shape[0] == 3:
-                                image_array = np.transpose(image_array, (1, 2, 0))
-                            image_array = (image_array * 255).astype(np.uint8)
-                            image = Image.fromarray(image_array)
-                        else:
-                            image = result
-                    else:
-                        raise ValueError("Unknown model interface - cannot generate image")
-                
-                # Save generated image
-                output_path = self.config.output_dir / f"{creature_name}_{view_name}_view.png"
-                output_path.parent.mkdir(parents=True, exist_ok=True)
-                
-                # Ensure image is a PIL Image and save
-                if not isinstance(image, Image.Image):
-                    if isinstance(image, np.ndarray):
-                        image = Image.fromarray((image * 255).astype(np.uint8))
-                    else:
-                        logger.warning(f"Unexpected image type: {type(image)}")
-                        continue
-                
-                image.save(output_path)
-                output_paths[view_name] = output_path
-                
-                logger.info(f"Generated {view_name} view: {output_path}")
-                
-            except Exception as e:
-                logger.error(f"Failed to generate {view_name} view: {str(e)}")
-                continue
-        
-        return output_paths
-
-class FluxMultiViewGenerator:
-    """Generate multi-view images using Flux via Gradio Client"""
-    
-    def __init__(self, config: ProductionConfig):
-        self.config = config
-        self.client = None
-        if config.hf_token:
-            try:
-                self.client = Client(config.flux_space, hf_token=config.hf_token)
-                logger.info(f"Connected to Flux Space: {config.flux_space}")
-            except Exception as e:
-                logger.error(f"Failed to connect to Flux Space: {e}")
-                raise
-        else:
-            logger.warning("No HF token provided, Flux unavailable")
-    
-    async def generate_creature_views(self, base_prompt: str, 
-                                    creature_name: str) -> Dict[str, Path]:
-        """Generate multiple consistent views of a creature"""
-        
-        if not self.client:
-            raise RuntimeError("Flux client not initialized - missing HF token")
-        
-        # View specifications for 3D reconstruction
-        view_specs = {
-            "front": {
-                "angle": "front view",
-                "pose": "T-pose, arms spread",
-                "details": "facing camera directly"
-            },
-            "back": {
-                "angle": "back view", 
-                "pose": "T-pose, arms spread",
-                "details": "showing back details"
-            },
-            "left": {
-                "angle": "left side profile",
-                "pose": "T-pose, arms spread",
-                "details": "perfect 90 degree side view"
-            },
-            "right": {
-                "angle": "right side profile",
-                "pose": "T-pose, arms spread", 
-                "details": "perfect 90 degree side view"
-            },
-            "front_angle": {
-                "angle": "3/4 front view",
-                "pose": "dynamic action pose",
-                "details": "45 degree angle"
-            },
-            "top": {
-                "angle": "top-down bird's eye view",
-                "pose": "spread pose",
-                "details": "looking down from above"
-            }
-        }
-        
-        generated_views = {}
-        base_seed = int(time.time()) % 1000000  # Consistent seed for similar results
-        
-        for view_name, spec in view_specs.items():
-            # Construct view-specific prompt
-            view_prompt = self._construct_view_prompt(base_prompt, spec)
-            
-            # Generate image via Flux
-            logger.info(f"Generating {view_name} view...")
-            image_path = await self._generate_single_view(
-                view_prompt, 
-                f"{creature_name}_{view_name}",
-                seed=base_seed + hash(view_name) % 1000  # Slight variation per view
-            )
-            
-            generated_views[view_name] = image_path
-        
-        # Generate concept sheet
-        concept_sheet = await self._create_concept_sheet(generated_views, creature_name)
-        generated_views["concept_sheet"] = concept_sheet
-        
-        return generated_views
-    
-    def _construct_view_prompt(self, base_prompt: str, spec: Dict[str, str]) -> str:
-        """Construct detailed prompt for specific view"""
-        
-        # Core consistency elements
-        consistency_prompts = [
-            "consistent character design",
-            "same colors and patterns",
-            "identical features",
-            "matching proportions"
-        ]
-        
-        # Technical requirements for 3D
-        technical_prompts = [
-            "clean white background",
-            "studio lighting",
-            "no shadows",
-            "high contrast",
-            "clear silhouette",
-            "orthographic projection"
-        ]
-        
-        # Combine all elements
-        full_prompt = (
-            f"{base_prompt}, "
-            f"{spec['angle']}, {spec['pose']}, {spec['details']}, "
-            f"{', '.join(consistency_prompts)}, "
-            f"{', '.join(technical_prompts)}, "
-            "professional 3D character reference, game asset quality"
-        )
-        
-        return full_prompt
-    
-    async def _generate_single_view(self, prompt: str, filename: str, 
-                                  seed: int = None) -> Path:
-        """Generate single image using Flux API"""
-        
-        try:
-            # Call Flux via Gradio Client
-            result = self.client.predict(
-                prompt=prompt,
-                seed=seed or 0,
-                randomize_seed=seed is None,
-                width=self.config.image_resolution,
-                height=self.config.image_resolution,
-                guidance_scale=self.config.guidance_scale,
-                num_inference_steps=self.config.inference_steps,
-                api_name="/infer"
-            )
-            
-            # Extract image path from result
-            image_info, used_seed = result
-            
-            # Download/copy image to our directory
-            source_path = image_info['path'] if 'path' in image_info else None
-            if source_path and os.path.exists(source_path):
-                output_path = self.config.temp_dir / f"{filename}.png"
-                shutil.copy(source_path, output_path)
-                return output_path
-            elif 'url' in image_info:
-                # Download from URL
-                output_path = self.config.temp_dir / f"{filename}.png"
-                response = requests.get(image_info['url'])
-                with open(output_path, 'wb') as f:
-                    f.write(response.content)
-                return output_path
-            else:
-                raise ValueError("No valid image path or URL in result")
-                
-        except Exception as e:
-            logger.error(f"Flux generation failed: {e}")
-            # Create fallback placeholder
-            return self._create_placeholder_image(filename)
-    
-    def _create_placeholder_image(self, filename: str) -> Path:
-        """Create placeholder image if generation fails"""
-        img = Image.new('RGB', (self.config.image_resolution, self.config.image_resolution), 
-                       color=(200, 200, 200))
-        output_path = self.config.temp_dir / f"{filename}_placeholder.png"
-        img.save(output_path)
-        return output_path
-    
-    async def _create_concept_sheet(self, views: Dict[str, Path], 
-                                  creature_name: str) -> Path:
-        """Create professional concept sheet from views"""
-        
-        # Layout: 3x2 grid
-        cols, rows = 3, 2
-        cell_size = self.config.image_resolution
-        margin = 50
-        
-        # Calculate sheet dimensions
-        sheet_width = cols * cell_size + (cols + 1) * margin
-        sheet_height = rows * cell_size + (rows + 1) * margin
-        
-        # Create sheet with light gray background
-        sheet = Image.new('RGB', (sheet_width, sheet_height), color=(240, 240, 240))
-        
-        # View positions in grid
-        positions = {
-            "front": (0, 0),
-            "back": (1, 0),
-            "left": (2, 0),
-            "right": (0, 1),
-            "front_angle": (1, 1),
-            "top": (2, 1)
-        }
-        
-        # Place views
-        for view_name, (col, row) in positions.items():
-            if view_name in views and views[view_name].exists():
-                img = Image.open(views[view_name])
-                img = img.resize((cell_size, cell_size), Image.Resampling.LANCZOS)
-                
-                x = margin + col * (cell_size + margin)
-                y = margin + row * (cell_size + margin)
-                
-                sheet.paste(img, (x, y))
-                
-                # Add label
-                # In production, use PIL.ImageDraw to add text labels
-        
-        # Save concept sheet
-        output_path = self.config.output_dir / f"{creature_name}_concept_sheet.png"
-        sheet.save(output_path, quality=95)
-        
-        return output_path
-
-class Hunyuan3DProcessor:
-    """Hunyuan3D-2.1 implementation using official Gradio Space API"""
-    
-    def __init__(self, config: ProductionConfig):
-        self.config = config
-        self.client = None
-        logger.info(f"Initializing Hunyuan3D-2.1 from space: {config.hunyuan3d_space}")
-        
-    def _initialize_client(self):
-        """Initialize Gradio client for Hunyuan3D Space"""
-        if self.client is None:
-            try:
-                from gradio_client import Client
-                
-                # Connect to the official Hunyuan3D Space
-                self.client = Client(
-                    src=self.config.hunyuan3d_space,
-                    hf_token=self.config.hf_token
-                )
-                logger.info(f"Connected to Hunyuan3D-2.1 Space: {self.config.hunyuan3d_space}")
-                
-            except Exception as e:
-                logger.error(f"Failed to connect to Hunyuan3D Space: {e}")
-                logger.info("Will try local fallback if available")
-                # Don't raise here, let the generation method handle fallback
-                self.client = None
-    
-    async def generate_3d_from_views(self, view_paths: Dict[str, Path], 
-                                   creature_name: str) -> Dict[str, Any]:
-        """Generate 3D model from image using Hunyuan3D-2.1 Space API"""
-        
-        self._initialize_client()
-        
-        logger.info(f"Generating 3D model from {len(view_paths)} views using Hunyuan3D-2.1")
-        start_time = time.time()
-        
-        try:
-            # Use the front view as primary input for Hunyuan3D
-            primary_view = None
-            if "front" in view_paths:
-                primary_view = view_paths["front"]
-            elif view_paths:
-                primary_view = next(iter(view_paths.values()))
-            
-            if not primary_view:
-                raise ValueError("No input images provided")
-            
-            if not primary_view.exists():
-                raise ValueError(f"Input image not found: {primary_view}")
-            
-            # Try using the official Hunyuan3D Space API
-            if self.client:
-                try:
-                    logger.info("Using official Hunyuan3D-2.1 Space API...")
-                    
-                    # Call the Hunyuan3D Space API
-                    # Based on the official interface, it typically takes an image input
-                    result = self.client.predict(
-                        image=str(primary_view),  # Input image path
-                        api_name="/generate_3d"  # API endpoint name (may vary)
-                    )
-                    
-                    # Handle the result - typically returns file paths or URLs
-                    if isinstance(result, (list, tuple)) and len(result) > 0:
-                        # Extract the 3D model file
-                        model_result = result[0] if isinstance(result[0], str) else result
-                        
-                        # Download or copy the result to our output directory
-                        mesh_path = self.config.output_dir / f"{creature_name}_hunyuan3d.glb"
-                        mesh_path.parent.mkdir(parents=True, exist_ok=True)
-                        
-                        if isinstance(model_result, str) and os.path.exists(model_result):
-                            # Copy from local path
-                            shutil.copy(model_result, mesh_path)
-                        elif isinstance(model_result, str) and model_result.startswith('http'):
-                            # Download from URL
-                            response = requests.get(model_result)
-                            with open(mesh_path, 'wb') as f:
-                                f.write(response.content)
-                        else:
-                            raise ValueError(f"Unexpected result format: {type(model_result)}")
-                        
-                        generation_time = time.time() - start_time
-                        
-                        # Get basic file statistics
-                        file_size = mesh_path.stat().st_size if mesh_path.exists() else 0
-                        
-                        return {
-                            "success": True,
-                            "mesh_path": mesh_path,
-                            "texture_path": mesh_path,  # Same file for GLB
-                            "statistics": {
-                                "file_size_mb": file_size / (1024 * 1024),
-                                "generation_time": generation_time,
-                                "model": "Hunyuan3D-2.1",
-                                "input_views": len(view_paths),
-                                "method": "official_space_api"
-                            }
-                        }
-                        
-                    else:
-                        raise ValueError("Invalid result from Hunyuan3D Space API")
-                        
-                except Exception as api_error:
-                    logger.error(f"Hunyuan3D Space API failed: {api_error}")
-                    logger.info("Falling back to alternative method...")
-                    # Fall through to local fallback
-            
-            # Fallback: Use local processing or placeholder
-            logger.info("Using local fallback for 3D generation...")
-            return await self._local_3d_fallback(primary_view, creature_name, start_time)
-            
-        except Exception as e:
-            logger.error(f"Hunyuan3D generation failed: {e}")
-            return {
-                "success": False,
-                "error": str(e)
-            }
-    
-    async def _local_3d_fallback(self, image_path: Path, creature_name: str, 
-                               start_time: float) -> Dict[str, Any]:
-        """Fallback method for 3D generation when Space API is unavailable"""
-        
-        logger.info("Generating placeholder 3D model...")
-        
-        # Create a simple cube mesh as placeholder
-        import trimesh
-        
-        # Generate a basic cube mesh
-        mesh = trimesh.creation.box(extents=[1.0, 1.0, 1.0])
-        
-        # Apply basic coloring based on input image
-        try:
-            input_image = Image.open(image_path).convert("RGB")
-            # Get dominant color from image
-            avg_color = np.array(input_image).mean(axis=(0, 1)) / 255.0
-            
-            # Apply color to mesh
-            if hasattr(mesh.visual, 'vertex_colors'):
-                mesh.visual.vertex_colors = np.tile(
-                    [*avg_color, 1.0], (len(mesh.vertices), 1)
-                ).astype(np.uint8) * 255
-                
-        except Exception as color_error:
-            logger.warning(f"Failed to apply coloring: {color_error}")
-        
-        # Save the mesh
-        mesh_path = self.config.output_dir / f"{creature_name}_fallback_3d.glb"
-        mesh_path.parent.mkdir(parents=True, exist_ok=True)
-        mesh.export(str(mesh_path))
-        
-        generation_time = time.time() - start_time
-        
-        return {
-            "success": True,
-            "mesh_path": mesh_path,
-            "texture_path": mesh_path,
-            "statistics": {
-                "vertices": len(mesh.vertices),
-                "faces": len(mesh.faces),
-                "generation_time": generation_time,
-                "model": "fallback_cube",
-                "input_views": 1,
-                "method": "local_fallback"
-            }
-        }
-
-class UniRigProcessor:
-    """UniRig integration using HuggingFace models and inference API"""
-    
-    def __init__(self, config: ProductionConfig):
-        self.config = config
-        self.model_path = None
-        self.client = None
-        logger.info(f"Initializing UniRig from HuggingFace: {config.unirig_hf_model}")
-        
-    def _setup_unirig(self):
-        """Setup UniRig using HuggingFace models and API"""
-        
-        try:
-            # Try to use HuggingFace Inference API first
-            from gradio_client import Client
-            
-            # Check if there's a UniRig Space available
-            try:
-                # This would be the ideal approach if there's a UniRig Space
-                self.client = Client(
-                    src=self.config.unirig_hf_model,  # or a specific space
-                    hf_token=self.config.hf_token
-                )
-                logger.info("Connected to UniRig via HuggingFace Space/API")
-                return
-            except:
-                logger.info("No UniRig Space found, trying direct model download...")
-            
-            # Fallback: Download models from HuggingFace
-            try:
-                self.model_path = snapshot_download(
-                    repo_id=self.config.unirig_hf_model,
-                    cache_dir=self.config.cache_dir,
-                    token=self.config.hf_token,
-                    allow_patterns=["*.py", "*.yaml", "*.json", "*.bin", "*.safetensors"]
-                )
-                logger.info(f"UniRig models downloaded to: {self.model_path}")
-                
-            except Exception as download_error:
-                logger.warning(f"Could not download UniRig from HF: {download_error}")
-                logger.info("UniRig will use procedural fallback method")
-                self.model_path = None
-                
-        except Exception as e:
-            logger.error(f"Failed to setup UniRig: {e}")
-            logger.info("UniRig will use procedural fallback method")
-            self.model_path = None
-    
-    async def auto_rig_creature(self, mesh_path: Path, creature_name: str,
-                              creature_type: str = "biped") -> Dict[str, Any]:
-        """Apply automatic rigging using UniRig via HuggingFace"""
-        
-        logger.info(f"Auto-rigging {creature_name} as {creature_type} using UniRig")
-        
-        # Setup UniRig if not already done
-        if self.model_path is None and self.client is None:
-            self._setup_unirig()
-        
-        try:
-            # Try using HuggingFace Space/API first
-            if self.client:
-                return await self._rig_via_hf_api(mesh_path, creature_name, creature_type)
-            
-            # Try using downloaded models
-            elif self.model_path:
-                return await self._rig_via_local_models(mesh_path, creature_name, creature_type)
-            
-            # Fallback to procedural rigging
-            else:
-                logger.info("No UniRig models available, using procedural fallback")
-                return await self._procedural_rigging_fallback(mesh_path, creature_name, creature_type)
-            
-        except Exception as e:
-            logger.error(f"UniRig failed: {e}")
-            # Fallback to procedural rigging
-            return await self._procedural_rigging_fallback(mesh_path, creature_name, creature_type)
-    
-    async def _rig_via_hf_api(self, mesh_path: Path, creature_name: str, 
-                            creature_type: str) -> Dict[str, Any]:
-        """Rig using HuggingFace Space API"""
-        
-        logger.info("Using UniRig HuggingFace Space API...")
-        
-        try:
-            # Call the UniRig Space API
-            result = self.client.predict(
-                mesh_file=str(mesh_path),
-                creature_type=creature_type,
-                api_name="/auto_rig"  # This would be the API endpoint
-            )
-            
-            # Handle the result
-            if isinstance(result, (list, tuple)) and len(result) > 0:
-                rigged_file = result[0]
-                
-                # Copy result to our output directory
-                output_dir = self.config.output_dir / "rigged"
-                output_dir.mkdir(exist_ok=True)
-                rigged_path = output_dir / f"{creature_name}_rigged.glb"
-                
-                if isinstance(rigged_file, str) and os.path.exists(rigged_file):
-                    shutil.copy(rigged_file, rigged_path)
-                elif isinstance(rigged_file, str) and rigged_file.startswith('http'):
-                    # Download from URL
-                    response = requests.get(rigged_file)
-                    with open(rigged_path, 'wb') as f:
-                        f.write(response.content)
-                else:
-                    raise ValueError(f"Unexpected result format: {type(rigged_file)}")
-                
-                return {
-                    "success": True,
-                    "rigged_path": rigged_path,
-                    "bone_count": "unknown",  # API doesn't provide this
-                    "method": "hf_space_api"
-                }
-            else:
-                raise ValueError("Invalid result from UniRig Space API")
-                
-        except Exception as api_error:
-            logger.error(f"UniRig Space API failed: {api_error}")
-            raise
-    
-    async def _rig_via_local_models(self, mesh_path: Path, creature_name: str,
-                                  creature_type: str) -> Dict[str, Any]:
-        """Rig using locally downloaded UniRig models"""
-        
-        logger.info("Using local UniRig models...")
-        
-        try:
-            # This would require implementing the UniRig inference pipeline
-            # For now, fall back to procedural method
-            logger.info("Local UniRig inference not yet implemented, using procedural fallback")
-            return await self._procedural_rigging_fallback(mesh_path, creature_name, creature_type)
-            
-        except Exception as e:
-            logger.error(f"Local UniRig inference failed: {e}")
-            raise
-    
-    async def _run_unirig_pipeline(self, mesh_path: Path, 
-                                 skeleton_config: str,
-                                 creature_name: str) -> Dict[str, Any]:
-        """Execute UniRig pipeline"""
-        
-        # Output paths
-        output_dir = self.config.output_dir / "rigged"
-        output_dir.mkdir(exist_ok=True)
-        
-        skeleton_path = output_dir / f"{creature_name}_skeleton.json"
-        weights_path = output_dir / f"{creature_name}_weights.npz"
-        rigged_path = output_dir / f"{creature_name}_rigged.glb"
-        
-        # Step 1: Generate skeleton
-        logger.info("Generating skeleton...")
-        skeleton_data = self._generate_skeleton(mesh_path, skeleton_config)
-        
-        with open(skeleton_path, 'w') as f:
-            json.dump(skeleton_data, f, indent=2)
-        
-        # Step 2: Calculate skinning weights
-        logger.info("Calculating skinning weights...")
-        weights = self._calculate_skinning_weights(mesh_path, skeleton_data)
-        
-        np.savez_compressed(weights_path, weights=weights)
-        
-        # Step 3: Apply rigging to mesh
-        logger.info("Applying rigging to mesh...")
-        rigged_mesh = self._apply_rigging(mesh_path, skeleton_data, weights)
-        
-        # Export rigged model
-        rigged_mesh.export(rigged_path)
-        
-        # Step 4: Validate rigging
-        validation = self._validate_rigging(rigged_mesh)
-        
-        return {
-            "success": True,
-            "rigged_path": rigged_path,
-            "skeleton_path": skeleton_path,
-            "weights_path": weights_path,
-            "bone_count": len(skeleton_data["bones"]),
-            "validation": validation
-        }
-    
-    def _generate_skeleton(self, mesh_path: Path, config: str) -> Dict[str, Any]:
-        """Generate skeleton structure"""
-        
-        # Load mesh for analysis
-        mesh = trimesh.load(mesh_path)
-        if isinstance(mesh, trimesh.Scene):
-            mesh = mesh.dump(concatenate=True)
-        
-        # Analyze mesh geometry
-        bounds = mesh.bounds
-        center = mesh.centroid
-        height = bounds[1][2] - bounds[0][2]
-        width = bounds[1][0] - bounds[0][0]
-        depth = bounds[1][1] - bounds[0][1]
-        
-        # Generate skeleton based on config
-        if "biped" in config:
-            skeleton = self._generate_biped_skeleton(bounds, center, height, width)
-        elif "quadruped" in config:
-            skeleton = self._generate_quadruped_skeleton(bounds, center, height, width, depth)
-        else:
-            skeleton = self._generate_generic_skeleton(bounds, center, height)
-        
-        return skeleton
-    
-    def _generate_biped_skeleton(self, bounds: np.ndarray, center: np.ndarray,
-                               height: float, width: float) -> Dict[str, Any]:
-        """Generate biped skeleton structure"""
-        
-        bones = {
-            "root": {"pos": [center[0], center[1], bounds[0][2]], "parent": None},
-            "spine": {"pos": [center[0], center[1], bounds[0][2] + height * 0.3], "parent": "root"},
-            "spine1": {"pos": [center[0], center[1], bounds[0][2] + height * 0.5], "parent": "spine"},
-            "spine2": {"pos": [center[0], center[1], bounds[0][2] + height * 0.7], "parent": "spine1"},
-            "neck": {"pos": [center[0], center[1], bounds[0][2] + height * 0.85], "parent": "spine2"},
-            "head": {"pos": [center[0], center[1], bounds[0][2] + height * 0.95], "parent": "neck"},
-            
-            # Arms
-            "shoulder_l": {"pos": [center[0] - width * 0.35, center[1], bounds[0][2] + height * 0.7], "parent": "spine2"},
-            "arm_l": {"pos": [center[0] - width * 0.5, center[1], bounds[0][2] + height * 0.6], "parent": "shoulder_l"},
-            "forearm_l": {"pos": [center[0] - width * 0.6, center[1], bounds[0][2] + height * 0.4], "parent": "arm_l"},
-            "hand_l": {"pos": [center[0] - width * 0.65, center[1], bounds[0][2] + height * 0.3], "parent": "forearm_l"},
-            
-            "shoulder_r": {"pos": [center[0] + width * 0.35, center[1], bounds[0][2] + height * 0.7], "parent": "spine2"},
-            "arm_r": {"pos": [center[0] + width * 0.5, center[1], bounds[0][2] + height * 0.6], "parent": "shoulder_r"},
-            "forearm_r": {"pos": [center[0] + width * 0.6, center[1], bounds[0][2] + height * 0.4], "parent": "arm_r"},
-            "hand_r": {"pos": [center[0] + width * 0.65, center[1], bounds[0][2] + height * 0.3], "parent": "forearm_r"},
-            
-            # Legs
-            "thigh_l": {"pos": [center[0] - width * 0.15, center[1], bounds[0][2] + height * 0.25], "parent": "root"},
-            "shin_l": {"pos": [center[0] - width * 0.15, center[1], bounds[0][2] + height * 0.12], "parent": "thigh_l"},
-            "foot_l": {"pos": [center[0] - width * 0.15, center[1], bounds[0][2]], "parent": "shin_l"},
-            
-            "thigh_r": {"pos": [center[0] + width * 0.15, center[1], bounds[0][2] + height * 0.25], "parent": "root"},
-            "shin_r": {"pos": [center[0] + width * 0.15, center[1], bounds[0][2] + height * 0.12], "parent": "thigh_r"},
-            "foot_r": {"pos": [center[0] + width * 0.15, center[1], bounds[0][2]], "parent": "shin_r"}
-        }
-        
-        return {
-            "bones": bones,
-            "type": "biped",
-            "version": "1.0"
-        }
-    
-    def _generate_quadruped_skeleton(self, bounds: np.ndarray, center: np.ndarray,
-                                   height: float, width: float, depth: float) -> Dict[str, Any]:
-        """Generate quadruped skeleton structure"""
-        
-        bones = {
-            "root": {"pos": [center[0], center[1], center[2]], "parent": None},
-            "spine": {"pos": [center[0], center[1] - depth * 0.2, center[2]], "parent": "root"},
-            "spine1": {"pos": [center[0], center[1], center[2]], "parent": "spine"},
-            "spine2": {"pos": [center[0], center[1] + depth * 0.2, center[2]], "parent": "spine1"},
-            "neck": {"pos": [center[0], center[1] + depth * 0.35, center[2] + height * 0.1], "parent": "spine2"},
-            "head": {"pos": [center[0], center[1] + depth * 0.45, center[2] + height * 0.2], "parent": "neck"},
-            
-            # Front legs
-            "shoulder_l": {"pos": [center[0] - width * 0.25, center[1] + depth * 0.15, center[2]], "parent": "spine2"},
-            "arm_l": {"pos": [center[0] - width * 0.25, center[1] + depth * 0.15, center[2] - height * 0.3], "parent": "shoulder_l"},
-            "forearm_l": {"pos": [center[0] - width * 0.25, center[1] + depth * 0.15, bounds[0][2]], "parent": "arm_l"},
-            
-            "shoulder_r": {"pos": [center[0] + width * 0.25, center[1] + depth * 0.15, center[2]], "parent": "spine2"},
-            "arm_r": {"pos": [center[0] + width * 0.25, center[1] + depth * 0.15, center[2] - height * 0.3], "parent": "shoulder_r"},
-            "forearm_r": {"pos": [center[0] + width * 0.25, center[1] + depth * 0.15, bounds[0][2]], "parent": "arm_r"},
-            
-            # Back legs
-            "hip_l": {"pos": [center[0] - width * 0.25, center[1] - depth * 0.15, center[2]], "parent": "spine"},
-            "thigh_l": {"pos": [center[0] - width * 0.25, center[1] - depth * 0.15, center[2] - height * 0.3], "parent": "hip_l"},
-            "shin_l": {"pos": [center[0] - width * 0.25, center[1] - depth * 0.15, bounds[0][2]], "parent": "thigh_l"},
-            
-            "hip_r": {"pos": [center[0] + width * 0.25, center[1] - depth * 0.15, center[2]], "parent": "spine"},
-            "thigh_r": {"pos": [center[0] + width * 0.25, center[1] - depth * 0.15, center[2] - height * 0.3], "parent": "hip_r"},
-            "shin_r": {"pos": [center[0] + width * 0.25, center[1] - depth * 0.15, bounds[0][2]], "parent": "thigh_r"},
-            
-            # Tail
-            "tail": {"pos": [center[0], center[1] - depth * 0.4, center[2]], "parent": "spine"},
-            "tail1": {"pos": [center[0], center[1] - depth * 0.6, center[2] - height * 0.1], "parent": "tail"},
-            "tail2": {"pos": [center[0], center[1] - depth * 0.8, center[2] - height * 0.2], "parent": "tail1"}
-        }
-        
-        return {
-            "bones": bones,
-            "type": "quadruped",
-            "version": "1.0"
-        }
-    
-    def _generate_generic_skeleton(self, bounds: np.ndarray, center: np.ndarray,
-                                 height: float) -> Dict[str, Any]:
-        """Generate generic skeleton for unknown creature types"""
-        
-        bones = {
-            "root": {"pos": [center[0], center[1], bounds[0][2]], "parent": None},
-            "spine": {"pos": center.tolist(), "parent": "root"},
-            "top": {"pos": [center[0], center[1], bounds[1][2]], "parent": "spine"}
-        }
-        
-        return {
-            "bones": bones,
-            "type": "generic",
-            "version": "1.0"
-        }
-    
-    def _calculate_skinning_weights(self, mesh_path: Path, 
-                                  skeleton: Dict[str, Any]) -> np.ndarray:
-        """Calculate vertex skinning weights"""
-        
-        # Load mesh
-        mesh = trimesh.load(mesh_path)
-        if isinstance(mesh, trimesh.Scene):
-            mesh = mesh.dump(concatenate=True)
-        
-        vertices = mesh.vertices
-        bones = skeleton["bones"]
-        
-        # Initialize weight matrix
-        num_vertices = len(vertices)
-        num_bones = len(bones)
-        weights = np.zeros((num_vertices, num_bones))
-        
-        # Calculate weights using heat diffusion approximation
-        bone_positions = []
-        for bone_name, bone_data in bones.items():
-            bone_positions.append(bone_data["pos"])
-        bone_positions = np.array(bone_positions)
-        
-        # For each vertex, calculate influence from each bone
-        for i, vertex in enumerate(vertices):
-            distances = np.linalg.norm(bone_positions - vertex, axis=1)
-            
-            # Use Gaussian kernel for smooth falloff
-            sigma = np.median(distances) * 0.5
-            vertex_weights = np.exp(-distances**2 / (2 * sigma**2))
-            
-            # Normalize to sum to 1
-            vertex_weights /= vertex_weights.sum()
-            
-            weights[i] = vertex_weights
-        
-        # Ensure each vertex is influenced by at most 4 bones (standard in games)
-        for i in range(num_vertices):
-            # Get top 4 weights
-            top_indices = np.argsort(weights[i])[-4:]
-            mask = np.zeros(num_bones, dtype=bool)
-            mask[top_indices] = True
-            
-            # Zero out non-top weights
-            weights[i, ~mask] = 0
-            
-            # Renormalize
-            if weights[i].sum() > 0:
-                weights[i] /= weights[i].sum()
-        
-        return weights
-    
-    def _apply_rigging(self, mesh_path: Path, skeleton: Dict[str, Any],
-                     weights: np.ndarray) -> trimesh.Trimesh:
-        """Apply rigging to mesh"""
-        
-        # Load mesh
-        mesh = trimesh.load(mesh_path)
-        if isinstance(mesh, trimesh.Scene):
-            mesh = mesh.dump(concatenate=True)
-        
-        # Store rigging data in mesh metadata
-        mesh.metadata['skeleton'] = skeleton
-        mesh.metadata['weights'] = weights.tolist()
-        mesh.metadata['rigged'] = True
-        
-        return mesh
-    
-    def _validate_rigging(self, mesh: trimesh.Trimesh) -> Dict[str, Any]:
-        """Validate rigging quality"""
-        
-        validation = {
-            "has_skeleton": "skeleton" in mesh.metadata,
-            "has_weights": "weights" in mesh.metadata,
-            "bone_count": 0,
-            "orphaned_vertices": 0,
-            "max_influences_per_vertex": 0
-        }
-        
-        if "skeleton" in mesh.metadata:
-            validation["bone_count"] = len(mesh.metadata["skeleton"]["bones"])
-        
-        if "weights" in mesh.metadata:
-            weights = np.array(mesh.metadata["weights"])
-            
-            # Check for orphaned vertices (no bone influence)
-            orphaned = np.sum(weights.sum(axis=1) == 0)
-            validation["orphaned_vertices"] = int(orphaned)
-            
-            # Check max influences per vertex
-            influences_per_vertex = np.sum(weights > 0, axis=1)
-            validation["max_influences_per_vertex"] = int(np.max(influences_per_vertex))
-        
-        return validation
-    
-    async def _procedural_rigging_fallback(self, mesh_path: Path, 
-                                         creature_name: str,
-                                         creature_type: str) -> Dict[str, Any]:
-        """Fallback procedural rigging"""
-        
-        logger.info("Using procedural rigging fallback...")
-        
-        # Generate skeleton
-        skeleton = self._generate_skeleton(mesh_path, f"configs/skeleton_{creature_type}.json")
-        
-        # Calculate weights
-        weights = self._calculate_skinning_weights(mesh_path, skeleton)
-        
-        # Apply rigging
-        rigged_mesh = self._apply_rigging(mesh_path, skeleton, weights)
-        
-        # Save
-        rigged_path = self.config.output_dir / f"{creature_name}_rigged_fallback.glb"
-        rigged_mesh.export(rigged_path)
-        
-        return {
-            "success": True,
-            "rigged_path": rigged_path,
-            "bone_count": len(skeleton["bones"]),
-            "method": "procedural_fallback"
-        }
-
-class SimplifiedHunyuan3D:
-    """Simplified 3D generation when Hunyuan3D is not available"""
-    
-    def __init__(self, config: ProductionConfig):
-        self.config = config
-    
-    def generate_mesh(self, views: Dict[str, np.ndarray]) -> Dict[str, Any]:
-        """Generate simple mesh from views"""
-        
-        # This is a placeholder - in production, implement proper 3D reconstruction
-        # Could use photogrammetry libraries or other image-to-3D methods
-        
-        # For now, generate a simple procedural mesh based on front view
-        front_view = views.get("front", list(views.values())[0])
-        
-        # Extract silhouette
-        gray = np.mean(front_view, axis=2)
-        mask = gray < 0.9  # Assuming white background
-        
-        # Find bounding box
-        coords = np.column_stack(np.where(mask))
-        y_min, x_min = coords.min(axis=0)
-        y_max, x_max = coords.max(axis=0)
-        
-        height = y_max - y_min
-        width = x_max - x_min
-        
-        # Generate simple mesh based on silhouette
-        # This is highly simplified - real implementation would use proper reconstruction
-        vertices = []
-        faces = []
-        
-        # Create a simple extruded shape
-        resolution = 32
-        for i in range(resolution):
-            for j in range(resolution):
-                y = i / resolution * height
-                x = j / resolution * width
-                
-                # Check if inside silhouette
-                img_y = int(y_min + y)
-                img_x = int(x_min + x)
-                
-                if 0 <= img_y < mask.shape[0] and 0 <= img_x < mask.shape[1]:
-                    if mask[img_y, img_x]:
-                        # Add front and back vertices
-                        z_front = 0.1
-                        z_back = -0.1
-                        
-                        vertices.append([x / width - 0.5, -y / height + 0.5, z_front])
-                        vertices.append([x / width - 0.5, -y / height + 0.5, z_back])
-        
-        vertices = np.array(vertices)
-        
-        # Generate faces (simplified)
-        # In production, use proper triangulation
-        num_verts = len(vertices) // 2
-        for i in range(num_verts - 1):
-            if i % resolution != resolution - 1:
-                # Front face
-                faces.append([i*2, (i+1)*2, (i+resolution)*2])
-                faces.append([(i+1)*2, (i+resolution+1)*2, (i+resolution)*2])
-                
-                # Back face
-                faces.append([i*2+1, (i+resolution)*2+1, (i+1)*2+1])
-                faces.append([(i+1)*2+1, (i+resolution)*2+1, (i+resolution+1)*2+1])
-        
-        faces = np.array(faces)
-        
-        return {
-            "vertices": vertices,
-            "faces": faces
-        }
-
-class ProductionPipeline:
-    """
-    Complete production-ready open-source pipeline
-    OmniGen2/Flux -> Hunyuan3D -> UniRig
-    """
-    
-    def __init__(self, config: Optional[ProductionConfig] = None):
-        self.config = config or ProductionConfig()
-        
-        # Create directories
-        self.config.output_dir.mkdir(parents=True, exist_ok=True)
-        self.config.cache_dir.mkdir(parents=True, exist_ok=True)
-        self.config.temp_dir.mkdir(parents=True, exist_ok=True)
-        
-        # Initialize components
-        logger.info("Initializing production pipeline components...")
-        
-        # Use OmniGen2 by default, Flux as fallback
-        if self.config.text_to_image_model == "omnigen2":
-            try:
-                self.image_generator = OmniGen2MultiViewGenerator(self.config)
-                logger.info("Using OmniGen2 for image generation")
-            except Exception as e:
-                logger.warning(f"OmniGen2 failed to initialize: {e}, falling back to Flux")
-                self.image_generator = FluxMultiViewGenerator(self.config)
-        else:
-            self.image_generator = FluxMultiViewGenerator(self.config)
-            
-        self.hunyuan3d = Hunyuan3DProcessor(self.config)
-        self.unirig = UniRigProcessor(self.config)
-        
-        logger.info("Production pipeline ready!")
-    
-    async def create_creature(self, 
-                            prompt: str,
-                            name: str,
-                            creature_type: str = "biped") -> Dict[str, Any]:
-        """
-        Create fully rigged 3D creature from text prompt
-        """
-        
-        pipeline_start = time.time()
-        logger.info(f"Creating creature: {name}")
-        logger.info(f"Prompt: {prompt}")
-        logger.info(f"Type: {creature_type}")
-        
-        results = {
-            "name": name,
-            "prompt": prompt,
-            "creature_type": creature_type,
-            "pipeline_stages": {}
-        }
-        
-        try:
-            # Stage 1: Multi-view generation 
-            logger.info("=" * 50)
-            logger.info(f"Stage 1: Generating multi-view images with {self.config.text_to_image_model}")
-            stage_start = time.time()
-            
-            views = await self.image_generator.generate_creature_views(prompt, name)
-            
-            results["pipeline_stages"]["image_generation"] = {
-                "success": True,
-                "model": self.config.text_to_image_model,
-                "duration": time.time() - stage_start,
-                "outputs": {k: str(v) for k, v in views.items()}
-            }
-            
-            # Stage 2: 3D generation with Hunyuan3D
-            logger.info("=" * 50)
-            logger.info("Stage 2: Generating 3D model with Hunyuan3D")
-            stage_start = time.time()
-            
-            model_data = await self.hunyuan3d.generate_3d_from_views(views, name)
-            
-            results["pipeline_stages"]["hunyuan3d"] = {
-                "success": model_data["success"],
-                "duration": time.time() - stage_start,
-                "outputs": model_data
-            }
-            
-            if not model_data["success"]:
-                raise Exception(f"3D generation failed: {model_data.get('error', 'Unknown error')}")
-            
-            # Stage 3: Auto-rigging with UniRig
-            logger.info("=" * 50)
-            logger.info("Stage 3: Auto-rigging with UniRig")
-            stage_start = time.time()
-            
-            rig_data = await self.unirig.auto_rig_creature(
-                model_data["mesh_path"],
-                name,
-                creature_type
-            )
-            
-            results["pipeline_stages"]["unirig"] = {
-                "success": rig_data["success"],
-                "duration": time.time() - stage_start,
-                "outputs": rig_data
-            }
-            
-            # Summary
-            total_duration = time.time() - pipeline_start
-            
-            results["success"] = True
-            results["total_duration"] = total_duration
-            results["final_outputs"] = {
-                "concept_sheet": str(views.get("concept_sheet", "")),
-                "mesh": str(model_data["mesh_path"]),
-                "texture": str(model_data.get("texture_path", model_data["mesh_path"])),
-                "rigged_model": str(rig_data["rigged_path"]),
-                "statistics": {
-                    "vertices": model_data.get("statistics", {}).get("vertices", "unknown"),
-                    "faces": model_data.get("statistics", {}).get("faces", "unknown"),
-                    "bones": rig_data.get("bone_count", "unknown"),
-                    "generation_time": model_data.get("statistics", {}).get("generation_time", 0),
-                    "model_method": model_data.get("statistics", {}).get("method", "unknown")
-                }
-            }
-            
-            # Save complete metadata
-            metadata_path = self.config.output_dir / f"{name}_complete_metadata.json"
-            with open(metadata_path, 'w') as f:
-                json.dump(results, f, indent=2)
-            
-            logger.info("=" * 50)
-            logger.info(f"Creature creation completed in {total_duration:.2f}s!")
-            logger.info(f"Final model: {rig_data['rigged_path']}")
-            
-            return results
-            
-        except Exception as e:
-            logger.error(f"Pipeline failed: {str(e)}")
-            results["success"] = False
-            results["error"] = str(e)
-            results["total_duration"] = time.time() - pipeline_start
-            return results
-
-# Create default configuration
-def create_production_config():
-    """Create production configuration file"""
-    
-    config = {
-        "flux_space": "black-forest-labs/FLUX.1-dev",
-        "sparc3d_space": None,
-        "sparc3d_repo": "https://github.com/lizhihao6/Sparc3D",
-        "unirig_repo": "https://github.com/VAST-AI-Research/UniRig",
-        "unirig_hf_model": "VAST-AI/UniRig",
-        "image_resolution": 1024,
-        "num_views": 6,
-        "guidance_scale": 3.5,
-        "inference_steps": 28,
-        "hunyuan3d_resolution": 512,
-        "target_polycount": 30000,
-        "texture_resolution": 2048,
-        "output_dir": "./digipal_3d_output",
-        "cache_dir": "./digipal_model_cache",
-        "temp_dir": "./digipal_temp",
-        "device": "cuda" if torch.cuda.is_available() else "cpu",
-        "hf_token": None
-    }
-    
-    with open("production_3d_config.json", 'w') as f:
-        json.dump(config, f, indent=2)
-    
-    logger.info("Production config created: production_3d_config.json")
-
-# Example usage
-async def example_usage():
-    """Example of using the production pipeline"""
-    
-    # Create config
-    config = ProductionConfig()
-    
-    # Initialize pipeline
-    pipeline = ProductionPipeline(config)
-    
-    # Create a creature
-    result = await pipeline.create_creature(
-        prompt="adorable baby dragon with big blue eyes, small wings, chubby body, friendly expression",
-        name="BabyBlueDragon",
-        creature_type="quadruped"
-    )
-    
-    if result["success"]:
-        print("\nCreature created successfully!")
-        print(f"Rigged model: {result['final_outputs']['rigged_model']}")
-        print(f"Total time: {result['total_duration']:.2f}s")
-    else:
-        print(f"\nCreation failed: {result.get('error', 'Unknown error')}")
-
-if __name__ == "__main__":
-    # Create config
-    create_production_config()
-    
-    # Run example
-    asyncio.run(example_usage())
\ No newline at end of file
diff --git a/src/ui/state_manager.py b/src/ui/state_manager.py
deleted file mode 100644
index d7e5c2f5510425f4aa3c945ed923661029b715cb..0000000000000000000000000000000000000000
--- a/src/ui/state_manager.py
+++ /dev/null
@@ -1,417 +0,0 @@
-import asyncio
-import json
-import aiofiles
-import sqlite3
-import aiosqlite
-from pathlib import Path
-from typing import Dict, List, Optional, Any, Union
-from datetime import datetime, timedelta
-import logging
-import pickle
-import gzip
-
-from ..core.monster_engine import Monster
-
-class AdvancedStateManager:
-    def __init__(self, save_dir: str = "data/saves"):
-        self.save_dir = Path(save_dir)
-        self.save_dir.mkdir(parents=True, exist_ok=True)
-        
-        self.db_path = self.save_dir / "monsters.db"
-        self.backup_dir = self.save_dir / "backups"
-        self.backup_dir.mkdir(exist_ok=True)
-        
-        self.logger = logging.getLogger(__name__)
-        
-        # In-memory cache for active monsters
-        self.monster_cache: Dict[str, Monster] = {}
-        self.cache_timestamps: Dict[str, datetime] = {}
-        self.cache_timeout = timedelta(minutes=30)
-        
-        # Connection pool
-        self.db_pool = None
-        
-    async def initialize(self):
-        """Initialize the state management system"""
-        try:
-            # Create database tables
-            await self._create_tables()
-            
-            # Start background tasks
-            asyncio.create_task(self._cache_cleanup_task())
-            asyncio.create_task(self._auto_backup_task())
-            
-            self.logger.info("State manager initialized successfully")
-            
-        except Exception as e:
-            self.logger.error(f"State manager initialization failed: {e}")
-            raise
-    
-    async def _create_tables(self):
-        """Create database tables for monster storage"""
-        async with aiosqlite.connect(self.db_path) as db:
-            await db.execute("""
-                CREATE TABLE IF NOT EXISTS monsters (
-                    id TEXT PRIMARY KEY,
-                    name TEXT NOT NULL,
-                    species TEXT NOT NULL,
-                    data BLOB NOT NULL,
-                    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
-                    updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
-                    is_active BOOLEAN DEFAULT 1
-                )
-            """)
-            
-            await db.execute("""
-                CREATE TABLE IF NOT EXISTS monster_interactions (
-                    id INTEGER PRIMARY KEY AUTOINCREMENT,
-                    monster_id TEXT NOT NULL,
-                    interaction_type TEXT NOT NULL,
-                    interaction_data TEXT,
-                    timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
-                    FOREIGN KEY (monster_id) REFERENCES monsters (id)
-                )
-            """)
-            
-            await db.execute("""
-                CREATE TABLE IF NOT EXISTS evolution_history (
-                    id INTEGER PRIMARY KEY AUTOINCREMENT,
-                    monster_id TEXT NOT NULL,
-                    from_stage TEXT NOT NULL,
-                    to_stage TEXT NOT NULL,
-                    evolution_trigger TEXT,
-                    timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
-                    FOREIGN KEY (monster_id) REFERENCES monsters (id)
-                )
-            """)
-            
-            await db.execute("""
-                CREATE TABLE IF NOT EXISTS breeding_records (
-                    id INTEGER PRIMARY KEY AUTOINCREMENT,
-                    parent1_id TEXT NOT NULL,
-                    parent2_id TEXT NOT NULL,
-                    offspring_id TEXT NOT NULL,
-                    breeding_time_hours REAL,
-                    compatibility_score REAL,
-                    timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP
-                )
-            """)
-            
-            # Create indexes for performance
-            await db.execute("CREATE INDEX IF NOT EXISTS idx_monsters_active ON monsters (is_active)")
-            await db.execute("CREATE INDEX IF NOT EXISTS idx_interactions_monster ON monster_interactions (monster_id)")
-            await db.execute("CREATE INDEX IF NOT EXISTS idx_interactions_type ON monster_interactions (interaction_type)")
-            
-            await db.commit()
-    
-    async def save_monster(self, monster: Monster) -> bool:
-        """Save monster to persistent storage"""
-        try:
-            # Update cache
-            self.monster_cache[monster.id] = monster
-            self.cache_timestamps[monster.id] = datetime.now()
-            
-            # Serialize monster data with compression
-            monster_data = gzip.compress(pickle.dumps(monster.dict()))
-            
-            # Save to database
-            async with aiosqlite.connect(self.db_path) as db:
-                await db.execute("""
-                    INSERT OR REPLACE INTO monsters 
-                    (id, name, species, data, updated_at) 
-                    VALUES (?, ?, ?, ?, ?)
-                """, (
-                    monster.id,
-                    monster.name,
-                    monster.species,
-                    monster_data,
-                    datetime.now().isoformat()
-                ))
-                await db.commit()
-            
-            self.logger.debug(f"Saved monster {monster.name} ({monster.id})")
-            return True
-            
-        except Exception as e:
-            self.logger.error(f"Failed to save monster {monster.id}: {e}")
-            return False
-    
-    async def load_monster(self, monster_id: str) -> Optional[Monster]:
-        """Load monster from storage"""
-        try:
-            # Check cache first
-            if monster_id in self.monster_cache:
-                cache_time = self.cache_timestamps.get(monster_id)
-                if cache_time and (datetime.now() - cache_time) < self.cache_timeout:
-                    return self.monster_cache[monster_id]
-            
-            # Load from database
-            async with aiosqlite.connect(self.db_path) as db:
-                async with db.execute(
-                    "SELECT data FROM monsters WHERE id = ? AND is_active = 1", 
-                    (monster_id,)
-                ) as cursor:
-                    row = await cursor.fetchone()
-                    
-                    if not row:
-                        return None
-                    
-                    # Decompress and deserialize
-                    monster_data = pickle.loads(gzip.decompress(row[0]))
-                    monster = Monster(**monster_data)
-                    
-                    # Update cache
-                    self.monster_cache[monster_id] = monster
-                    self.cache_timestamps[monster_id] = datetime.now()
-                    
-                    self.logger.debug(f"Loaded monster {monster.name} ({monster_id})")
-                    return monster
-                    
-        except Exception as e:
-            self.logger.error(f"Failed to load monster {monster_id}: {e}")
-            return None
-    
-    async def list_monsters(self, active_only: bool = True) -> List[Dict[str, Any]]:
-        """List all monsters with basic information"""
-        try:
-            where_clause = "WHERE is_active = 1" if active_only else ""
-            
-            async with aiosqlite.connect(self.db_path) as db:
-                async with db.execute(f"""
-                    SELECT id, name, species, created_at, updated_at 
-                    FROM monsters {where_clause} 
-                    ORDER BY updated_at DESC
-                """) as cursor:
-                    
-                    monsters = []
-                    async for row in cursor:
-                        monsters.append({
-                            "id": row[0],
-                            "name": row[1],
-                            "species": row[2],
-                            "created_at": row[3],
-                            "updated_at": row[4]
-                        })
-                    
-                    return monsters
-                    
-        except Exception as e:
-            self.logger.error(f"Failed to list monsters: {e}")
-            return []
-    
-    async def delete_monster(self, monster_id: str, soft_delete: bool = True) -> bool:
-        """Delete monster from storage"""
-        try:
-            # Remove from cache
-            self.monster_cache.pop(monster_id, None)
-            self.cache_timestamps.pop(monster_id, None)
-            
-            async with aiosqlite.connect(self.db_path) as db:
-                if soft_delete:
-                    # Soft delete - mark as inactive
-                    await db.execute(
-                        "UPDATE monsters SET is_active = 0 WHERE id = ?",
-                        (monster_id,)
-                    )
-                else:
-                    # Hard delete - remove completely
-                    await db.execute("DELETE FROM monsters WHERE id = ?", (monster_id,))
-                    await db.execute("DELETE FROM monster_interactions WHERE monster_id = ?", (monster_id,))
-                    await db.execute("DELETE FROM evolution_history WHERE monster_id = ?", (monster_id,))
-                
-                await db.commit()
-            
-            self.logger.info(f"Deleted monster {monster_id} (soft={soft_delete})")
-            return True
-            
-        except Exception as e:
-            self.logger.error(f"Failed to delete monster {monster_id}: {e}")
-            return False
-    
-    async def log_interaction(self, monster_id: str, interaction_type: str, interaction_data: Dict[str, Any] = None):
-        """Log monster interaction for analytics"""
-        try:
-            data_json = json.dumps(interaction_data) if interaction_data else None
-            
-            async with aiosqlite.connect(self.db_path) as db:
-                await db.execute("""
-                    INSERT INTO monster_interactions 
-                    (monster_id, interaction_type, interaction_data) 
-                    VALUES (?, ?, ?)
-                """, (monster_id, interaction_type, data_json))
-                await db.commit()
-                
-        except Exception as e:
-            self.logger.error(f"Failed to log interaction: {e}")
-    
-    async def log_evolution(self, monster_id: str, from_stage: str, to_stage: str, trigger: str):
-        """Log monster evolution event"""
-        try:
-            async with aiosqlite.connect(self.db_path) as db:
-                await db.execute("""
-                    INSERT INTO evolution_history 
-                    (monster_id, from_stage, to_stage, evolution_trigger) 
-                    VALUES (?, ?, ?, ?)
-                """, (monster_id, from_stage, to_stage, trigger))
-                await db.commit()
-                
-        except Exception as e:
-            self.logger.error(f"Failed to log evolution: {e}")
-    
-    async def get_monster_statistics(self, monster_id: str) -> Dict[str, Any]:
-        """Get comprehensive statistics for a monster"""
-        try:
-            async with aiosqlite.connect(self.db_path) as db:
-                # Get interaction counts
-                async with db.execute("""
-                    SELECT interaction_type, COUNT(*) as count 
-                    FROM monster_interactions 
-                    WHERE monster_id = ? 
-                    GROUP BY interaction_type
-                """, (monster_id,)) as cursor:
-                    interactions = {row[0]: row[1] async for row in cursor}
-                
-                # Get evolution history
-                async with db.execute("""
-                    SELECT from_stage, to_stage, evolution_trigger, timestamp 
-                    FROM evolution_history 
-                    WHERE monster_id = ? 
-                    ORDER BY timestamp
-                """, (monster_id,)) as cursor:
-                    evolutions = [
-                        {
-                            "from": row[0],
-                            "to": row[1], 
-                            "trigger": row[2],
-                            "timestamp": row[3]
-                        } async for row in cursor
-                    ]
-                
-                return {
-                    "interactions": interactions,
-                    "evolutions": evolutions,
-                    "total_interactions": sum(interactions.values()),
-                    "evolution_count": len(evolutions)
-                }
-                
-        except Exception as e:
-            self.logger.error(f"Failed to get statistics for {monster_id}: {e}")
-            return {}
-    
-    async def create_backup(self) -> str:
-        """Create a compressed backup of all monster data"""
-        try:
-            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
-            backup_file = self.backup_dir / f"monsters_backup_{timestamp}.gz"
-            
-            # Export all active monsters
-            monsters = await self.list_monsters(active_only=True)
-            backup_data = {
-                "timestamp": timestamp,
-                "monsters": []
-            }
-            
-            for monster_info in monsters:
-                monster = await self.load_monster(monster_info["id"])
-                if monster:
-                    backup_data["monsters"].append(monster.dict())
-            
-            # Compress and save
-            with gzip.open(backup_file, 'wt') as f:
-                json.dump(backup_data, f, default=str, indent=2)
-            
-            self.logger.info(f"Created backup: {backup_file}")
-            return str(backup_file)
-            
-        except Exception as e:
-            self.logger.error(f"Backup creation failed: {e}")
-            return ""
-    
-    async def restore_backup(self, backup_file: str) -> bool:
-        """Restore monsters from backup file"""
-        try:
-            backup_path = Path(backup_file)
-            if not backup_path.exists():
-                return False
-            
-            with gzip.open(backup_path, 'rt') as f:
-                backup_data = json.load(f)
-            
-            restored_count = 0
-            for monster_data in backup_data.get("monsters", []):
-                try:
-                    monster = Monster(**monster_data)
-                    if await self.save_monster(monster):
-                        restored_count += 1
-                except Exception as e:
-                    self.logger.warning(f"Failed to restore monster: {e}")
-            
-            self.logger.info(f"Restored {restored_count} monsters from backup")
-            return restored_count > 0
-            
-        except Exception as e:
-            self.logger.error(f"Backup restoration failed: {e}")
-            return False
-    
-    async def _cache_cleanup_task(self):
-        """Background task to clean up expired cache entries"""
-        while True:
-            try:
-                current_time = datetime.now()
-                expired_keys = []
-                
-                for monster_id, timestamp in self.cache_timestamps.items():
-                    if current_time - timestamp > self.cache_timeout:
-                        expired_keys.append(monster_id)
-                
-                for key in expired_keys:
-                    self.monster_cache.pop(key, None)
-                    self.cache_timestamps.pop(key, None)
-                
-                if expired_keys:
-                    self.logger.debug(f"Cleaned up {len(expired_keys)} expired cache entries")
-                
-                # Sleep for 10 minutes before next cleanup
-                await asyncio.sleep(600)
-                
-            except Exception as e:
-                self.logger.error(f"Cache cleanup task failed: {e}")
-                await asyncio.sleep(60)  # Shorter sleep on error
-    
-    async def _auto_backup_task(self):
-        """Background task for automatic backups"""
-        while True:
-            try:
-                # Create backup every 6 hours
-                await asyncio.sleep(21600)  # 6 hours
-                
-                backup_file = await self.create_backup()
-                if backup_file:
-                    # Clean up old backups (keep last 10)
-                    await self._cleanup_old_backups()
-                
-            except Exception as e:
-                self.logger.error(f"Auto backup task failed: {e}")
-                await asyncio.sleep(3600)  # Retry in 1 hour on error
-    
-    async def _cleanup_old_backups(self, keep_count: int = 10):
-        """Clean up old backup files"""
-        try:
-            backup_files = list(self.backup_dir.glob("monsters_backup_*.gz"))
-            backup_files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
-            
-            for old_backup in backup_files[keep_count:]:
-                old_backup.unlink()
-                self.logger.debug(f"Removed old backup: {old_backup}")
-                
-        except Exception as e:
-            self.logger.error(f"Backup cleanup failed: {e}")
-    
-    def get_cache_stats(self) -> Dict[str, Any]:
-        """Get cache performance statistics"""
-        return {
-            "cached_monsters": len(self.monster_cache),
-            "cache_timeout_minutes": self.cache_timeout.total_seconds() / 60,
-            "oldest_cache_entry": min(self.cache_timestamps.values()) if self.cache_timestamps else None,
-            "newest_cache_entry": max(self.cache_timestamps.values()) if self.cache_timestamps else None
-        }
\ No newline at end of file
diff --git a/src/ui/streamlit_interface.py b/src/ui/streamlit_interface.py
deleted file mode 100644
index 3b11e3e5d5e2de31f4347a07bbf0b44cce2c2ac7..0000000000000000000000000000000000000000
--- a/src/ui/streamlit_interface.py
+++ /dev/null
@@ -1,565 +0,0 @@
-"""
-Streamlit Interface for DigiPal - Modern AI Monster Companion
-Replaces Gradio with Streamlit for better user experience
-"""
-
-import streamlit as st
-import asyncio
-import logging
-import json
-import time
-import requests
-import io
-from typing import Dict, List, Optional, Any, Tuple
-from datetime import datetime, timedelta
-import numpy as np
-from PIL import Image
-import threading
-
-# Configure logging
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-
-# API Configuration
-API_BASE_URL = "http://localhost:7861"  # FastAPI backend
-
-class StreamlitDigiPalInterface:
-    """Modern Streamlit interface for DigiPal"""
-    
-    def __init__(self):
-        self.logger = logging.getLogger(__name__)
-        
-        # Initialize session state
-        if 'current_monster' not in st.session_state:
-            st.session_state.current_monster = None
-        if 'monster_stats' not in st.session_state:
-            st.session_state.monster_stats = {}
-        if 'conversation_history' not in st.session_state:
-            st.session_state.conversation_history = []
-        if 'available_monsters' not in st.session_state:
-            st.session_state.available_monsters = []
-            
-    def run(self):
-        """Main Streamlit application"""
-        
-        # Page configuration
-        st.set_page_config(
-            page_title="DigiPal - AI Monster Companion",
-            page_icon="🐉",
-            layout="wide",
-            initial_sidebar_state="expanded"
-        )
-        
-        # Custom CSS for cyberpunk theme
-        self._apply_custom_css()
-        
-        # Header with cyberpunk styling
-        st.markdown('<h1 class="digipal-title">🐉 DigiPal</h1>', unsafe_allow_html=True)
-        st.markdown('<p style="text-align: center; font-family: Rajdhani, sans-serif; font-size: 1.2rem; color: #00ffff; margin-top: -1rem;">Advanced AI Monster Companion with 3D Generation</p>', unsafe_allow_html=True)
-        
-        # Sidebar for monster management
-        self._render_sidebar()
-        
-        # Main content area
-        if st.session_state.current_monster:
-            self._render_monster_interface()
-        else:
-            self._render_welcome_screen()
-    
-    def _apply_custom_css(self):
-        """Apply custom CSS for cyberpunk theme"""
-        st.markdown("""
-        <style>
-        /* Import Google Fonts */
-        @import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700;900&family=Rajdhani:wght@300;400;500;600;700&display=swap');
-        
-        /* Main background */
-        .main {
-            background: linear-gradient(135deg, #0a0a0a 0%, #1a0d2e 25%, #16213e 50%, #0f3460 75%, #0e4b99 100%);
-            color: #e0e0e0;
-            font-family: 'Rajdhani', sans-serif;
-        }
-        
-        /* Headers */
-        h1, h2, h3 {
-            font-family: 'Orbitron', monospace;
-            color: #00ffff;
-            text-shadow: 0 0 20px #00ffff40;
-        }
-        
-        /* Sidebar */
-        .css-1d391kg {
-            background: linear-gradient(180deg, #1a1a2e 0%, #16213e 100%);
-            border-right: 2px solid #00ffff40;
-        }
-        
-        /* Buttons */
-        .stButton > button {
-            background: linear-gradient(45deg, #ff0080, #00ffff);
-            color: #0a0a0a;
-            border: none;
-            border-radius: 25px;
-            padding: 0.75rem 1.5rem;
-            font-weight: bold;
-            font-family: 'Orbitron', monospace;
-            font-size: 0.9rem;
-            box-shadow: 0 0 20px rgba(255, 0, 128, 0.3);
-            transition: all 0.3s ease;
-            text-transform: uppercase;
-            letter-spacing: 1px;
-        }
-        .stButton > button:hover {
-            transform: translateY(-3px) scale(1.05);
-            box-shadow: 0 0 30px rgba(0, 255, 255, 0.6);
-            background: linear-gradient(45deg, #00ffff, #ff0080);
-        }
-        
-        /* Input fields */
-        .stTextInput > div > div > input, .stTextArea > div > div > textarea, .stSelectbox > div > div > select {
-            background: rgba(0, 0, 0, 0.8);
-            border: 2px solid #00ffff40;
-            border-radius: 10px;
-            color: #e0e0e0;
-            font-family: 'Rajdhani', sans-serif;
-        }
-        .stTextInput > div > div > input:focus, .stTextArea > div > div > textarea:focus {
-            border-color: #00ffff;
-            box-shadow: 0 0 15px #00ffff40;
-        }
-        
-        /* Monster stats container */
-        .monster-stats {
-            background: linear-gradient(135deg, rgba(0, 255, 255, 0.1) 0%, rgba(255, 0, 128, 0.1) 100%);
-            border-radius: 15px;
-            padding: 1.5rem;
-            backdrop-filter: blur(10px);
-            border: 2px solid rgba(0, 255, 255, 0.3);
-            box-shadow: 0 0 30px rgba(0, 255, 255, 0.2);
-        }
-        
-        /* Progress bars */
-        .stProgress > div > div > div {
-            background: linear-gradient(90deg, #ff0080 0%, #00ffff 100%);
-        }
-        
-        /* Metrics */
-        [data-testid="metric-container"] {
-            background: rgba(0, 0, 0, 0.6);
-            border: 1px solid #00ffff40;
-            border-radius: 10px;
-            padding: 1rem;
-            box-shadow: 0 0 15px rgba(0, 255, 255, 0.1);
-        }
-        
-        /* Chat messages */
-        .stChatMessage {
-            background: rgba(0, 0, 0, 0.7);
-            border-radius: 15px;
-            border-left: 4px solid #00ffff;
-            margin: 0.5rem 0;
-        }
-        
-        /* Success/Error messages */
-        .stSuccess {
-            background: rgba(0, 255, 0, 0.1);
-            border: 1px solid #00ff00;
-            color: #00ff00;
-        }
-        .stError {
-            background: rgba(255, 0, 0, 0.1);
-            border: 1px solid #ff0000;
-            color: #ff6666;
-        }
-        
-        /* Neon text */
-        .neon-text {
-            color: #00ffff;
-            text-shadow: 0 0 10px #00ffff, 0 0 20px #00ffff, 0 0 30px #00ffff;
-            font-family: 'Orbitron', monospace;
-            font-weight: 700;
-        }
-        
-        /* DigiPal title effect */
-        .digipal-title {
-            background: linear-gradient(45deg, #ff0080, #00ffff, #ff0080);
-            background-size: 200% 200%;
-            -webkit-background-clip: text;
-            -webkit-text-fill-color: transparent;
-            animation: neon-glow 2s ease-in-out infinite alternate;
-            font-family: 'Orbitron', monospace;
-            font-weight: 900;
-            font-size: 3rem;
-            text-align: center;
-            margin: 1rem 0;
-        }
-        
-        @keyframes neon-glow {
-            from { background-position: 0% 50%; }
-            to { background-position: 100% 50%; }
-        }
-        
-        /* Holographic effect for containers */
-        .holo-container {
-            background: linear-gradient(135deg, 
-                rgba(0, 255, 255, 0.1) 0%, 
-                rgba(0, 255, 255, 0.05) 25%, 
-                rgba(255, 0, 128, 0.05) 50%, 
-                rgba(255, 0, 128, 0.1) 75%, 
-                rgba(0, 255, 255, 0.1) 100%);
-            border: 2px solid;
-            border-image: linear-gradient(45deg, #00ffff, #ff0080, #00ffff) 1;
-            border-radius: 15px;
-            padding: 1.5rem;
-            backdrop-filter: blur(15px);
-            box-shadow: 
-                0 0 20px rgba(0, 255, 255, 0.3),
-                inset 0 0 20px rgba(255, 0, 128, 0.1);
-        }
-        </style>
-        """, unsafe_allow_html=True)
-    
-    def _render_sidebar(self):
-        """Render sidebar with monster management"""
-        with st.sidebar:
-            st.header("🎮 Monster Management")
-            
-            # Load available monsters
-            if st.button("🔄 Refresh Monsters"):
-                self._load_available_monsters()
-            
-            # Monster selection
-            if st.session_state.available_monsters:
-                selected_monster = st.selectbox(
-                    "Select Monster:",
-                    options=["None"] + [m["name"] for m in st.session_state.available_monsters],
-                    index=0
-                )
-                
-                if selected_monster != "None":
-                    if st.button("🐾 Load Monster"):
-                        self._load_monster(selected_monster)
-            
-            # Create new monster
-            st.subheader("🆕 Create New Monster")
-            with st.form("create_monster"):
-                new_name = st.text_input("Monster Name:")
-                personality = st.selectbox(
-                    "Personality:",
-                    ["FRIENDLY", "ENERGETIC", "CALM", "CURIOUS", "BRAVE"]
-                )
-                
-                if st.form_submit_button("🥚 Create Monster"):
-                    self._create_monster(new_name, personality)
-            
-            # Current monster info
-            if st.session_state.current_monster:
-                st.subheader(f"🐉 {st.session_state.current_monster['name']}")
-                st.write(f"**Stage:** {st.session_state.current_monster.get('stage', 'Unknown')}")
-                st.write(f"**Personality:** {st.session_state.current_monster.get('personality', 'Unknown')}")
-    
-    def _render_welcome_screen(self):
-        """Render welcome screen when no monster is selected"""
-        col1, col2 = st.columns([2, 1])
-        
-        with col1:
-            st.markdown("""
-            <div class="holo-container">
-            <h2 class="neon-text">Welcome to DigiPal! 🐉</h2>
-            
-            <p style="font-size: 1.3rem; color: #e0e0e0; margin-bottom: 1.5rem;">
-            <strong>The most advanced AI monster companion experience</strong>
-            </p>
-            
-            <h3 style="color: #ff0080;">🚀 Revolutionary Features:</h3>
-            <ul style="font-size: 1.1rem; line-height: 1.8;">
-            <li>🤖 <strong style="color: #00ffff;">Advanced AI Conversations</strong> with Qwen 2.5</li>
-            <li>🎤 <strong style="color: #00ffff;">Voice Interaction</strong> with Kyutai STT-2.6b</li>
-            <li>🎨 <strong style="color: #00ffff;">3D Model Generation</strong> with OmniGen2 → Hunyuan3D → UniRig</li>
-            <li>📊 <strong style="color: #00ffff;">Complex Care System</strong> inspired by Digimon World</li>
-            <li>🧬 <strong style="color: #00ffff;">Dynamic Evolution</strong> based on care quality</li>
-            <li>💬 <strong style="color: #00ffff;">Personality-driven Responses</strong></li>
-            </ul>
-            
-            <h3 style="color: #ff0080; margin-top: 2rem;">⚡ Getting Started:</h3>
-            <ol style="font-size: 1.1rem; line-height: 1.8;">
-            <li><span style="color: #00ffff;">Create a new monster</span> in the sidebar</li>
-            <li><span style="color: #00ffff;">Talk to your monster</span> and watch it grow</li>
-            <li><span style="color: #00ffff;">Generate a unique 3D model</span></li>
-            <li><span style="color: #00ffff;">Care for your digital companion!</span></li>
-            </ol>
-            </div>
-            """, unsafe_allow_html=True)
-        
-        with col2:
-            st.markdown("""
-            <div class="holo-container" style="text-align: center;">
-            <h3 class="neon-text">🔮 Your AI Companion Awaits</h3>
-            <div style="background: linear-gradient(45deg, #ff0080, #00ffff); 
-                        border-radius: 20px; 
-                        padding: 2rem; 
-                        margin: 1rem 0;
-                        box-shadow: 0 0 30px rgba(0, 255, 255, 0.5);">
-                <p style="font-size: 4rem; margin: 0; animation: neon-glow 2s ease-in-out infinite alternate;">🐉</p>
-                <p style="font-size: 1.2rem; margin: 0.5rem 0; color: #0a0a0a; font-weight: bold;">DigiPal</p>
-            </div>
-            <p style="color: #e0e0e0; font-style: italic;">Ready to create your perfect digital companion?</p>
-            </div>
-            """, unsafe_allow_html=True)
-    
-    def _render_monster_interface(self):
-        """Render main monster interaction interface"""
-        monster = st.session_state.current_monster
-        
-        # Main layout
-        col1, col2 = st.columns([2, 1])
-        
-        with col1:
-            # Conversation area
-            self._render_conversation_area()
-            
-            # Action buttons
-            self._render_action_buttons()
-        
-        with col2:
-            # Monster stats and 3D model
-            self._render_monster_stats()
-            self._render_3d_model_section()
-    
-    def _render_conversation_area(self):
-        """Render conversation interface"""
-        st.subheader("💬 Talk to Your Monster")
-        
-        # Chat history
-        chat_container = st.container()
-        with chat_container:
-            for message in st.session_state.conversation_history:
-                if message["role"] == "user":
-                    st.chat_message("user").write(message["content"])
-                else:
-                    st.chat_message("assistant").write(message["content"])
-        
-        # Chat input
-        user_input = st.chat_input("Say something to your monster...")
-        
-        if user_input:
-            self._send_message(user_input)
-    
-    def _render_action_buttons(self):
-        """Render care action buttons"""
-        st.subheader("🎮 Care Actions")
-        
-        col1, col2, col3 = st.columns(3)
-        
-        with col1:
-            if st.button("🍖 Feed"):
-                self._perform_action("feed")
-            if st.button("🏃 Train"):
-                self._perform_action("train")
-        
-        with col2:
-            if st.button("🎲 Play"):
-                self._perform_action("play")
-            if st.button("🧼 Clean"):
-                self._perform_action("clean")
-        
-        with col3:
-            if st.button("💊 Heal"):
-                self._perform_action("heal")
-            if st.button("😴 Rest"):
-                self._perform_action("rest")
-    
-    def _render_monster_stats(self):
-        """Render monster statistics"""
-        st.subheader("📊 Monster Stats")
-        
-        if 'stats' in st.session_state.current_monster:
-            stats = st.session_state.current_monster['stats']
-            
-            # Create visual stat bars
-            for stat_name, value in stats.items():
-                if isinstance(value, (int, float)):
-                    # Normalize to 0-100 for progress bar
-                    normalized_value = min(100, max(0, value))
-                    st.metric(
-                        label=stat_name.title(),
-                        value=f"{value:.1f}",
-                        delta=None
-                    )
-                    st.progress(normalized_value / 100)
-        else:
-            st.info("Load a monster to see stats")
-    
-    def _render_3d_model_section(self):
-        """Render 3D model generation section"""
-        st.subheader("🎨 3D Model Generation")
-        
-        # Model display area
-        if st.session_state.current_monster and st.session_state.current_monster.get('model_url'):
-            st.success("3D Model Ready!")
-            st.write(f"Model: {st.session_state.current_monster['model_url']}")
-        else:
-            st.info("No 3D model generated yet")
-        
-        # Generation controls
-        with st.form("generate_3d"):
-            description = st.text_area(
-                "Custom Description (optional):",
-                placeholder="A cute dragon with blue scales and friendly eyes..."
-            )
-            
-            if st.form_submit_button("🎨 Generate 3D Model"):
-                self._generate_3d_model(description)
-    
-    def _load_available_monsters(self):
-        """Load list of available monsters from API"""
-        try:
-            response = requests.get(f"{API_BASE_URL}/api/monsters", timeout=5)
-            if response.status_code == 200:
-                data = response.json()
-                st.session_state.available_monsters = data.get("monsters", [])
-                st.success(f"Found {len(st.session_state.available_monsters)} monsters")
-            else:
-                st.error("Failed to load monsters")
-        except requests.exceptions.RequestException as e:
-            st.warning("🔧 Backend not connected")
-            st.info("💡 To enable full functionality, start backend: `python app.py`")
-            # Add some demo monsters for UI preview
-            st.session_state.available_monsters = [
-                {"name": "Demo Dragon", "id": "demo1", "stage": "Adult"},
-                {"name": "Cyber Wolf", "id": "demo2", "stage": "Champion"}
-            ]
-    
-    def _load_monster(self, monster_name: str):
-        """Load a specific monster"""
-        try:
-            # Find monster by name
-            monster_data = None
-            for monster in st.session_state.available_monsters:
-                if monster["name"] == monster_name:
-                    monster_data = monster
-                    break
-            
-            if monster_data:
-                # Load full monster data from API
-                response = requests.get(f"{API_BASE_URL}/api/monsters/{monster_data['id']}")
-                if response.status_code == 200:
-                    st.session_state.current_monster = response.json()
-                    st.session_state.conversation_history = st.session_state.current_monster.get('conversation_history', [])
-                    st.success(f"Loaded {monster_name}!")
-                    st.rerun()
-                else:
-                    st.error("Failed to load monster details")
-        except Exception as e:
-            st.error(f"Error loading monster: {str(e)}")
-    
-    def _create_monster(self, name: str, personality: str):
-        """Create a new monster"""
-        if not name:
-            st.error("Please enter a monster name")
-            return
-        
-        try:
-            response = requests.post(
-                f"{API_BASE_URL}/api/monsters",
-                json={"name": name, "personality": personality}
-            )
-            if response.status_code == 200:
-                monster_data = response.json()
-                st.session_state.current_monster = monster_data
-                st.session_state.conversation_history = []
-                st.success(f"Created {name}!")
-                st.rerun()
-            else:
-                st.error("Failed to create monster")
-        except Exception as e:
-            st.error(f"Error creating monster: {str(e)}")
-    
-    def _send_message(self, message: str):
-        """Send message to monster"""
-        if not st.session_state.current_monster:
-            return
-        
-        try:
-            # Add user message to history
-            st.session_state.conversation_history.append({
-                "role": "user",
-                "content": message,
-                "timestamp": datetime.now().isoformat()
-            })
-            
-            # Send to API
-            response = requests.post(
-                f"{API_BASE_URL}/api/monsters/{st.session_state.current_monster['id']}/talk",
-                json={"message": message}
-            )
-            
-            if response.status_code == 200:
-                data = response.json()
-                # Add AI response to history
-                st.session_state.conversation_history.append({
-                    "role": "assistant",
-                    "content": data["response"],
-                    "timestamp": datetime.now().isoformat()
-                })
-                
-                # Update monster stats
-                st.session_state.current_monster['stats'] = data.get("stats", {})
-                st.rerun()
-            else:
-                st.error("Failed to send message")
-        except Exception as e:
-            st.error(f"Error sending message: {str(e)}")
-    
-    def _perform_action(self, action: str):
-        """Perform care action on monster"""
-        if not st.session_state.current_monster:
-            return
-        
-        try:
-            response = requests.post(
-                f"{API_BASE_URL}/api/monsters/{st.session_state.current_monster['id']}/action",
-                json={"action": action}
-            )
-            
-            if response.status_code == 200:
-                data = response.json()
-                st.session_state.current_monster['stats'] = data.get("stats", {})
-                st.success(f"Performed {action}!")
-                st.rerun()
-            else:
-                st.error(f"Failed to perform {action}")
-        except Exception as e:
-            st.error(f"Error performing {action}: {str(e)}")
-    
-    def _generate_3d_model(self, description: str = ""):
-        """Generate 3D model for monster"""
-        if not st.session_state.current_monster:
-            return
-        
-        try:
-            with st.spinner("Generating 3D model... This may take a few minutes."):
-                response = requests.post(
-                    f"{API_BASE_URL}/api/monsters/{st.session_state.current_monster['id']}/generate-3d",
-                    json={"description": description}
-                )
-                
-                if response.status_code == 200:
-                    data = response.json()
-                    if data["success"]:
-                        st.session_state.current_monster['model_url'] = data["model_url"]
-                        st.success("3D model generated successfully!")
-                        st.rerun()
-                    else:
-                        st.error("3D generation failed")
-                else:
-                    st.error("Failed to generate 3D model")
-        except Exception as e:
-            st.error(f"Error generating 3D model: {str(e)}")
-
-def main():
-    """Main entry point for Streamlit app"""
-    interface = StreamlitDigiPalInterface()
-    interface.run()
-
-if __name__ == "__main__":
-    main()
\ No newline at end of file
diff --git a/src/utils/performance_tracker.py b/src/utils/performance_tracker.py
deleted file mode 100644
index 4d6a73e9650a39bcd40815837d5c3057ba73739f..0000000000000000000000000000000000000000
--- a/src/utils/performance_tracker.py
+++ /dev/null
@@ -1,107 +0,0 @@
-import logging
-import time
-import psutil
-import torch
-from typing import Dict, Any, List
-from datetime import datetime
-import asyncio
-
-class PerformanceTracker:
-    def __init__(self):
-        self.logger = logging.getLogger(__name__)
-        self.metrics = {
-            "inference_times": [],
-            "memory_usage": [],
-            "cpu_usage": [],
-            "gpu_usage": [],
-            "total_requests": 0,
-            "successful_requests": 0,
-            "failed_requests": 0
-        }
-        self.start_time = time.time()
-        
-    async def initialize(self):
-        """Initialize performance tracking"""
-        self.logger.info("Performance tracker initialized")
-        
-        # Start background monitoring
-        asyncio.create_task(self._monitor_resources())
-    
-    async def _monitor_resources(self):
-        """Background task to monitor system resources"""
-        while True:
-            try:
-                # CPU usage
-                cpu_percent = psutil.cpu_percent(interval=1)
-                self.metrics["cpu_usage"].append({
-                    "timestamp": datetime.now().isoformat(),
-                    "value": cpu_percent
-                })
-                
-                # Memory usage
-                memory = psutil.virtual_memory()
-                self.metrics["memory_usage"].append({
-                    "timestamp": datetime.now().isoformat(),
-                    "value": memory.percent
-                })
-                
-                # GPU usage (if available)
-                if torch.cuda.is_available():
-                    try:
-                        allocated = torch.cuda.memory_allocated()
-                        max_allocated = torch.cuda.max_memory_allocated()
-                        
-                        # Avoid division by zero
-                        if max_allocated > 0:
-                            gpu_memory = allocated / max_allocated
-                        else:
-                            gpu_memory = 0.0
-                            
-                        self.metrics["gpu_usage"].append({
-                            "timestamp": datetime.now().isoformat(),
-                            "value": gpu_memory * 100
-                        })
-                    except Exception as gpu_error:
-                        self.logger.warning(f"GPU monitoring failed: {gpu_error}")
-                
-                # Keep only last 100 measurements
-                for metric in ["cpu_usage", "memory_usage", "gpu_usage"]:
-                    if len(self.metrics[metric]) > 100:
-                        self.metrics[metric] = self.metrics[metric][-100:]
-                
-                await asyncio.sleep(30)  # Monitor every 30 seconds
-                
-            except Exception as e:
-                self.logger.error(f"Resource monitoring failed: {e}")
-                await asyncio.sleep(60)
-    
-    def track_inference(self, duration: float):
-        """Track inference time"""
-        self.metrics["inference_times"].append(duration)
-        
-        # Keep only last 100 measurements
-        if len(self.metrics["inference_times"]) > 100:
-            self.metrics["inference_times"] = self.metrics["inference_times"][-100:]
-    
-    def track_request(self, success: bool):
-        """Track request outcome"""
-        self.metrics["total_requests"] += 1
-        if success:
-            self.metrics["successful_requests"] += 1
-        else:
-            self.metrics["failed_requests"] += 1
-    
-    def get_summary(self) -> Dict[str, Any]:
-        """Get performance summary"""
-        uptime_seconds = time.time() - self.start_time
-        
-        avg_inference = sum(self.metrics["inference_times"]) / len(self.metrics["inference_times"]) if self.metrics["inference_times"] else 0
-        
-        return {
-            "uptime_hours": uptime_seconds / 3600,
-            "total_requests": self.metrics["total_requests"],
-            "success_rate": self.metrics["successful_requests"] / self.metrics["total_requests"] if self.metrics["total_requests"] > 0 else 0,
-            "average_inference_time": avg_inference,
-            "current_cpu_usage": self.metrics["cpu_usage"][-1]["value"] if self.metrics["cpu_usage"] else 0,
-            "current_memory_usage": self.metrics["memory_usage"][-1]["value"] if self.metrics["memory_usage"] else 0
-        }
\ No newline at end of file
diff --git a/streamlit_app.py b/streamlit_app.py
deleted file mode 100644
index 61d14d785ee9eac4916a81aa1f54614be73f44aa..0000000000000000000000000000000000000000
--- a/streamlit_app.py
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env python3
-"""
-DigiPal Streamlit App - HuggingFace Spaces Entry Point
-Unified Streamlit application that includes embedded FastAPI functionality
-"""
-
-import streamlit as st
-import asyncio
-import threading
-import time
-import logging
-import sys
-import os
-from pathlib import Path
-
-# Add src to path
-sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
-
-# Configure logging
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-
-# Import our Streamlit interface
-from src.ui.streamlit_interface import StreamlitDigiPalInterface
-
-def start_background_services():
-    """Start background services needed for DigiPal"""
-    try:
-        # Create necessary directories
-        os.makedirs("data/saves", exist_ok=True)
-        os.makedirs("data/models", exist_ok=True)
-        os.makedirs("data/cache", exist_ok=True)
-        os.makedirs("logs", exist_ok=True)
-        
-        # For Spaces deployment, we'll run a simplified version
-        # that doesn't require separate FastAPI server
-        logger.info("DigiPal background services initialized")
-        
-    except Exception as e:
-        logger.error(f"Failed to initialize background services: {e}")
-
-def main():
-    """Main Streamlit application entry point"""
-    
-    # Initialize background services
-    if 'services_initialized' not in st.session_state:
-        start_background_services()
-        st.session_state.services_initialized = True
-    
-    # Create and run the interface
-    interface = StreamlitDigiPalInterface()
-    interface.run()
-
-if __name__ == "__main__":
-    main()
\ No newline at end of file
diff --git a/test_import.py b/test_import.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd69fdc8a4409c13550df9ba4a3ef9c413bb41e1
--- /dev/null
+++ b/test_import.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+"""Test script to verify all imports work correctly"""
+
+import sys
+import importlib
+
+def test_imports():
+    """Test that all required modules can be imported"""
+    
+    modules_to_test = [
+        # External dependencies
+        ('gradio', 'Gradio UI framework'),
+        ('torch', 'PyTorch'),
+        ('transformers', 'Transformers library'),
+        ('diffusers', 'Diffusers library'),
+        ('PIL', 'Pillow image library'),
+        ('numpy', 'NumPy'),
+        ('trimesh', 'Trimesh 3D library'),
+        
+        # Internal modules
+        ('core.ai_pipeline', 'AI Pipeline'),
+        ('core.game_mechanics', 'Game Mechanics'),
+        ('core.state_manager', 'State Manager'),
+        ('core.auth_manager', 'Auth Manager'),
+        ('models.stt_processor', 'STT Processor'),
+        ('models.text_generator', 'Text Generator'),
+        ('models.image_generator', 'Image Generator'),
+        ('models.model_3d_generator', '3D Model Generator'),
+        ('models.rigging_processor', 'Rigging Processor'),
+        ('ui.themes', 'UI Themes'),
+        ('ui.interfaces', 'UI Interfaces'),
+        ('utils.fallbacks', 'Fallback Manager'),
+        ('utils.caching', 'Model Cache'),
+    ]
+    
+    print("🔍 Testing imports...\n")
+    
+    failed = []
+    
+    for module_name, description in modules_to_test:
+        try:
+            importlib.import_module(module_name)
+            print(f"✅ {description} ({module_name})")
+        except ImportError as e:
+            print(f"❌ {description} ({module_name}): {e}")
+            failed.append((module_name, str(e)))
+        except Exception as e:
+            print(f"⚠️  {description} ({module_name}): Imported but with error: {e}")
+    
+    print("\n" + "="*50 + "\n")
+    
+    if failed:
+        print(f"❌ {len(failed)} imports failed:")
+        for module, error in failed:
+            print(f"   - {module}: {error}")
+        print("\n💡 Install missing dependencies with: pip install -r requirements.txt")
+        return False
+    else:
+        print("✅ All imports successful!")
+        print("\n🚀 You can now run: python app.py")
+        return True
+
+if __name__ == "__main__":
+    success = test_imports()
+    sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/test_ui.py b/test_ui.py
deleted file mode 100755
index b9f9dba230ad675189c8f554fb9591190181524d..0000000000000000000000000000000000000000
--- a/test_ui.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python3
-"""
-Quick UI Test Script
-Run this to see the new Streamlit UI without the full backend
-"""
-
-import subprocess
-import sys
-import os
-import logging
-
-# Configure logging
-logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
-logger = logging.getLogger(__name__)
-
-def main():
-    """Test the Streamlit UI"""
-    logger.info("🎨 Testing DigiPal Streamlit UI")
-    logger.info("=" * 50)
-    logger.info("This will show you the new UI interface")
-    logger.info("Note: Backend features won't work without running the API")
-    logger.info("=" * 50)
-    
-    # Create necessary directories
-    os.makedirs("data/saves", exist_ok=True)
-    os.makedirs("data/models", exist_ok=True)
-    os.makedirs("data/cache", exist_ok=True)
-    os.makedirs("logs", exist_ok=True)
-    
-    try:
-        port = os.getenv("STREAMLIT_PORT", "8501")
-        logger.info(f"Starting Streamlit UI on port {port}...")
-        logger.info(f"Open your browser to: http://localhost:{port}")
-        
-        subprocess.run([
-            sys.executable, "-m", "streamlit", "run", 
-            "streamlit_app.py",
-            "--server.port", port,
-            "--server.address", "0.0.0.0",
-            "--server.headless", "false"
-        ], check=True)
-        
-    except subprocess.CalledProcessError as e:
-        logger.error(f"Failed to start Streamlit: {e}")
-        logger.info("Make sure you have streamlit installed: pip install streamlit")
-    except KeyboardInterrupt:
-        logger.info("UI test stopped")
-
-if __name__ == "__main__":
-    main()
\ No newline at end of file
diff --git a/src/ui/__init__.py b/ui/__init__.py
similarity index 100%
rename from src/ui/__init__.py
rename to ui/__init__.py
diff --git a/ui/interfaces.py b/ui/interfaces.py
new file mode 100644
index 0000000000000000000000000000000000000000..16b4663a6eef13061e47c632f92d6480117de923
--- /dev/null
+++ b/ui/interfaces.py
@@ -0,0 +1,458 @@
+import gradio as gr
+from typing import Callable, Any, Optional
+import numpy as np
+
+def create_voice_interface(pipeline: Any, game_mechanics: Any) -> gr.Column:
+    """Create voice-controlled interface for monster generation"""
+    
+    with gr.Column() as voice_interface:
+        gr.Markdown("""
+        ### 🎙️ Voice Control Interface
+        Speak your monster description or use the microphone to create your digital companion!
+        """)
+        
+        with gr.Row():
+            with gr.Column(scale=1):
+                # Voice input
+                voice_input = gr.Audio(
+                    label="🎤 Voice Description",
+                    sources=["microphone", "upload"],
+                    type="filepath",
+                    elem_classes=["cyber-input"],
+                    info="Describe your monster using voice"
+                )
+                
+                # Voice control buttons
+                with gr.Row():
+                    start_recording = gr.Button(
+                        "🔴 Start Recording",
+                        elem_classes=["cyber-button"],
+                        size="sm"
+                    )
+                    stop_recording = gr.Button(
+                        "⏹️ Stop Recording",
+                        elem_classes=["cyber-button"],
+                        size="sm"
+                    )
+                
+                # Real-time transcription display
+                transcription_display = gr.Textbox(
+                    label="📝 Transcription",
+                    placeholder="Your voice will be transcribed here...",
+                    interactive=False,
+                    lines=3,
+                    elem_classes=["cyber-output"]
+                )
+                
+                # Voice commands
+                gr.Markdown("""
+                **Voice Commands:**
+                - "Create a [type] monster" - Generate specific type
+                - "Make it [color/trait]" - Add characteristics
+                - "Give it [ability]" - Add special abilities
+                """)
+            
+            with gr.Column(scale=1):
+                # Preview and generation status
+                generation_status = gr.Markdown(
+                    value="🟢 Ready for voice input",
+                    elem_classes=["cyber-message"]
+                )
+                
+                # Audio visualization
+                audio_viz = gr.HTML(
+                    value="""
+                    <div class="audio-visualizer">
+                        <div class="bar"></div>
+                        <div class="bar"></div>
+                        <div class="bar"></div>
+                        <div class="bar"></div>
+                        <div class="bar"></div>
+                    </div>
+                    """,
+                    elem_classes=["cyber-container"]
+                )
+                
+                # Quick voice templates
+                gr.Markdown("**Quick Templates:**")
+                template_buttons = []
+                templates = [
+                    ("🔥 Fire Type", "Create a fierce fire-breathing dragon monster"),
+                    ("💧 Water Type", "Create a graceful aquatic monster"),
+                    ("⚡ Electric Type", "Create a sparking electric monster"),
+                    ("🌿 Nature Type", "Create a peaceful nature guardian monster")
+                ]
+                
+                for label, prompt in templates:
+                    btn = gr.Button(label, size="sm", elem_classes=["cyber-button"])
+                    template_buttons.append((btn, prompt))
+        
+        # Voice interface specific styling
+        gr.HTML("""
+        <style>
+        .audio-visualizer {
+            display: flex;
+            justify-content: center;
+            align-items: center;
+            height: 100px;
+            gap: 5px;
+        }
+        
+        .audio-visualizer .bar {
+            width: 10px;
+            height: 30px;
+            background: linear-gradient(to top, #00ff41, #8A2BE2);
+            animation: audio-wave 1s ease-in-out infinite;
+            border-radius: 5px;
+        }
+        
+        .audio-visualizer .bar:nth-child(1) { animation-delay: 0s; }
+        .audio-visualizer .bar:nth-child(2) { animation-delay: 0.1s; }
+        .audio-visualizer .bar:nth-child(3) { animation-delay: 0.2s; }
+        .audio-visualizer .bar:nth-child(4) { animation-delay: 0.3s; }
+        .audio-visualizer .bar:nth-child(5) { animation-delay: 0.4s; }
+        
+        @keyframes audio-wave {
+            0%, 100% { height: 30px; }
+            50% { height: 60px; }
+        }
+        </style>
+        """)
+    
+    return voice_interface
+
+def create_visual_interface(pipeline: Any, game_mechanics: Any) -> gr.Column:
+    """Create visual/camera-based interface for monster generation"""
+    
+    with gr.Column() as visual_interface:
+        gr.Markdown("""
+        ### 👁️ Visual Control Interface
+        Use images, drawings, or camera input to inspire your monster creation!
+        """)
+        
+        with gr.Row():
+            with gr.Column(scale=1):
+                # Image input options
+                with gr.Tabs():
+                    with gr.TabItem("📷 Camera"):
+                        camera_input = gr.Image(
+                            label="Camera Capture",
+                            sources=["webcam"],
+                            type="pil",
+                            elem_classes=["cyber-input", "camera-feed"]
+                        )
+                    
+                    with gr.TabItem("🖼️ Upload"):
+                        image_upload = gr.File(
+                            label="Upload Reference Images",
+                            file_count="multiple",
+                            file_types=["image"],
+                            elem_classes=["cyber-input"]
+                        )
+                        
+                        uploaded_gallery = gr.Gallery(
+                            label="Uploaded References",
+                            columns=3,
+                            rows=1,
+                            height="150px",
+                            elem_classes=["cyber-container"]
+                        )
+                    
+                    with gr.TabItem("✏️ Draw"):
+                        sketch_pad = gr.Sketchpad(
+                            label="Draw Your Monster",
+                            type="pil",
+                            elem_classes=["cyber-input", "sketch-pad"]
+                        )
+                
+                # Visual style options
+                gr.Markdown("**Visual Style Options:**")
+                
+                style_modifier = gr.CheckboxGroup(
+                    choices=[
+                        "🎨 Artistic",
+                        "🤖 Mechanical", 
+                        "✨ Magical",
+                        "🌊 Organic",
+                        "💎 Crystalline"
+                    ],
+                    label="Style Modifiers",
+                    elem_classes=["cyber-checkbox"]
+                )
+                
+                color_palette = gr.Radio(
+                    choices=[
+                        "🌈 Vibrant",
+                        "🌑 Dark",
+                        "❄️ Cool",
+                        "🔥 Warm",
+                        "🎨 Custom"
+                    ],
+                    label="Color Palette",
+                    value="🌈 Vibrant",
+                    elem_classes=["cyber-radio"]
+                )
+            
+            with gr.Column(scale=1):
+                # Image analysis display
+                image_analysis = gr.Markdown(
+                    value="📊 Image Analysis Results",
+                    elem_classes=["cyber-message"]
+                )
+                
+                # Detected features
+                detected_features = gr.JSON(
+                    label="🔍 Detected Features",
+                    elem_classes=["cyber-stats"]
+                )
+                
+                # Generation preview
+                preview_placeholder = gr.HTML(
+                    value="""
+                    <div class="preview-container">
+                        <div class="scanning-overlay">
+                            <div class="scan-line"></div>
+                        </div>
+                        <p>Preview will appear here...</p>
+                    </div>
+                    """,
+                    elem_classes=["cyber-container"]
+                )
+                
+                # Confidence meter
+                confidence_display = gr.HTML(
+                    value="""
+                    <div class="confidence-meter">
+                        <label>Generation Confidence:</label>
+                        <div class="meter-bar">
+                            <div class="meter-fill" style="width: 0%"></div>
+                        </div>
+                        <span class="meter-value">0%</span>
+                    </div>
+                    """,
+                    elem_classes=["cyber-container"]
+                )
+        
+        # Visual interface specific styling
+        gr.HTML("""
+        <style>
+        .camera-feed {
+            border: 3px solid #00ff41;
+            border-radius: 10px;
+            overflow: hidden;
+            position: relative;
+        }
+        
+        .camera-feed::after {
+            content: 'LIVE';
+            position: absolute;
+            top: 10px;
+            right: 10px;
+            background: red;
+            color: white;
+            padding: 5px 10px;
+            border-radius: 5px;
+            font-size: 12px;
+            animation: blink 1s infinite;
+        }
+        
+        @keyframes blink {
+            0%, 100% { opacity: 1; }
+            50% { opacity: 0.5; }
+        }
+        
+        .sketch-pad {
+            background: rgba(0, 0, 0, 0.9);
+            border: 2px solid #8A2BE2;
+        }
+        
+        .preview-container {
+            position: relative;
+            height: 200px;
+            display: flex;
+            align-items: center;
+            justify-content: center;
+            overflow: hidden;
+        }
+        
+        .scanning-overlay {
+            position: absolute;
+            top: 0;
+            left: 0;
+            right: 0;
+            bottom: 0;
+            pointer-events: none;
+        }
+        
+        .scan-line {
+            position: absolute;
+            top: 0;
+            left: 0;
+            right: 0;
+            height: 2px;
+            background: linear-gradient(90deg, transparent, #00ff41, transparent);
+            animation: scan-vertical 2s linear infinite;
+        }
+        
+        @keyframes scan-vertical {
+            0% { top: 0; }
+            100% { top: 100%; }
+        }
+        
+        .confidence-meter {
+            padding: 15px;
+        }
+        
+        .confidence-meter label {
+            color: #8A2BE2;
+            font-size: 14px;
+            margin-bottom: 5px;
+            display: block;
+        }
+        
+        .meter-bar {
+            background: rgba(0, 0, 0, 0.5);
+            border: 1px solid #00ff41;
+            height: 20px;
+            border-radius: 10px;
+            overflow: hidden;
+            margin: 10px 0;
+        }
+        
+        .meter-fill {
+            height: 100%;
+            background: linear-gradient(90deg, #00ff41, #8A2BE2);
+            transition: width 0.5s ease;
+        }
+        
+        .meter-value {
+            color: #00ff41;
+            font-weight: bold;
+            font-size: 18px;
+        }
+        </style>
+        """)
+    
+    return visual_interface
+
+def create_monster_status_display() -> gr.Column:
+    """Create monster status display component"""
+    
+    with gr.Column() as status_display:
+        # 3D Model viewer
+        model_viewer = gr.Model3D(
+            label="Your Digital Monster",
+            height=400,
+            elem_classes=["monster-display"]
+        )
+        
+        # Status indicators
+        with gr.Row():
+            hp_bar = gr.HTML(
+                value=create_status_bar("HP", 100, 100, "#ff4444"),
+                elem_classes=["status-bar"]
+            )
+            
+            hunger_bar = gr.HTML(
+                value=create_status_bar("Hunger", 80, 100, "#ffaa44"),
+                elem_classes=["status-bar"]
+            )
+            
+            happiness_bar = gr.HTML(
+                value=create_status_bar("Happiness", 90, 100, "#44ff44"),
+                elem_classes=["status-bar"]
+            )
+        
+        # Communication display
+        communication = gr.Textbox(
+            label="Monster Says",
+            value="🤖💚9️⃣0️⃣",
+            interactive=False,
+            elem_classes=["cyber-dialogue"]
+        )
+        
+        # Evolution progress
+        evolution_display = gr.HTML(
+            value="""
+            <div class="evolution-progress">
+                <h4>Evolution Progress</h4>
+                <div class="progress-ring">
+                    <svg width="120" height="120">
+                        <circle cx="60" cy="60" r="50" stroke="#333" stroke-width="10" fill="none"/>
+                        <circle cx="60" cy="60" r="50" stroke="#8A2BE2" stroke-width="10" fill="none"
+                                stroke-dasharray="314" stroke-dashoffset="157"
+                                transform="rotate(-90 60 60)"/>
+                    </svg>
+                    <div class="progress-text">50%</div>
+                </div>
+            </div>
+            """,
+            elem_classes=["evolution-display"]
+        )
+    
+    return status_display
+
+def create_status_bar(label: str, current: int, max_val: int, color: str) -> str:
+    """Create HTML status bar"""
+    percentage = (current / max_val) * 100
+    
+    return f"""
+    <div class="status-bar-container">
+        <label>{label}</label>
+        <div class="status-bar-bg">
+            <div class="status-bar-fill" style="width: {percentage}%; background: {color};"></div>
+        </div>
+        <span class="status-value">{current}/{max_val}</span>
+    </div>
+    """
+
+def create_training_interface() -> gr.Column:
+    """Create training interface component"""
+    
+    with gr.Column() as training_interface:
+        gr.Markdown("""
+        ### 💪 Training Center
+        Train your monster to improve its stats and prepare for evolution!
+        """)
+        
+        # Training schedule
+        with gr.Row():
+            with gr.Column():
+                training_schedule = gr.DataFrame(
+                    headers=["Time", "Activity", "Stat Focus", "Intensity"],
+                    datatype=["str", "str", "str", "number"],
+                    value=[
+                        ["Morning", "Strength Training", "Attack", 7],
+                        ["Afternoon", "Agility Course", "Speed", 5],
+                        ["Evening", "Meditation", "Special", 3]
+                    ],
+                    elem_classes=["cyber-table"]
+                )
+            
+            with gr.Column():
+                # Training mini-game
+                training_game = gr.HTML(
+                    value="""
+                    <div class="training-game">
+                        <h4>Quick Training</h4>
+                        <div class="game-area">
+                            <div class="target" id="training-target"></div>
+                        </div>
+                        <p>Click the targets to train!</p>
+                    </div>
+                    """,
+                    elem_classes=["cyber-container"]
+                )
+                
+                # Training rewards
+                rewards_display = gr.Markdown(
+                    """
+                    **Today's Rewards:**
+                    - 🏆 +15 Attack
+                    - 🛡️ +10 Defense
+                    - ⚡ +5 Speed
+                    """,
+                    elem_classes=["cyber-message"]
+                )
+    
+    return training_interface
\ No newline at end of file
diff --git a/ui/themes.py b/ui/themes.py
new file mode 100644
index 0000000000000000000000000000000000000000..aeba2227d6f3ace35f502351d4d9254daed7ad62
--- /dev/null
+++ b/ui/themes.py
@@ -0,0 +1,428 @@
+import gradio as gr
+from gradio.themes.base import Base
+from gradio.themes.utils import colors, fonts, sizes
+
+def get_cyberpunk_theme():
+    """Create cyberpunk-themed Gradio theme"""
+    
+    theme = gr.themes.Base(
+        primary_hue=colors.green,
+        secondary_hue=colors.purple,
+        neutral_hue=colors.gray,
+        font=[fonts.GoogleFont("Orbitron"), "monospace", "sans-serif"],
+    ).set(
+        # Background colors
+        body_background_fill="linear-gradient(135deg, #0a0a0a, #1a0033, #001122)",
+        body_background_fill_dark="linear-gradient(135deg, #050505, #0d001a, #000811)",
+        
+        # Text colors
+        body_text_color="#00ff41",
+        body_text_color_dark="#00ff41",
+        body_text_color_subdued="#8A2BE2",
+        body_text_color_subdued_dark="#8A2BE2",
+        
+        # Component colors
+        background_fill_primary="rgba(0, 255, 65, 0.1)",
+        background_fill_primary_dark="rgba(0, 255, 65, 0.05)",
+        background_fill_secondary="rgba(138, 43, 226, 0.1)",
+        background_fill_secondary_dark="rgba(138, 43, 226, 0.05)",
+        
+        # Borders
+        border_color_primary="#00ff41",
+        border_color_primary_dark="#00ff41",
+        border_color_accent="#8A2BE2",
+        border_color_accent_dark="#8A2BE2",
+        
+        # Shadows
+        shadow_drop="0 0 20px rgba(0, 255, 65, 0.5)",
+        shadow_drop_lg="0 0 30px rgba(138, 43, 226, 0.5)",
+        
+        # Buttons
+        button_primary_background_fill="linear-gradient(45deg, #00ff41, #8A2BE2)",
+        button_primary_background_fill_hover="linear-gradient(45deg, #00ff41, #9932CC)",
+        button_primary_background_fill_dark="linear-gradient(45deg, #00cc33, #7B68EE)",
+        button_primary_text_color="#000000",
+        button_primary_text_color_dark="#000000",
+        
+        # Inputs
+        input_background_fill="rgba(0, 0, 0, 0.8)",
+        input_background_fill_dark="rgba(0, 0, 0, 0.9)",
+        input_border_color="#00ff41",
+        input_border_color_dark="#00ff41",
+        input_border_color_focus="#8A2BE2",
+        input_border_color_focus_dark="#8A2BE2",
+        
+        # Other elements
+        block_background_fill="rgba(0, 0, 0, 0.6)",
+        block_background_fill_dark="rgba(0, 0, 0, 0.8)",
+        block_border_color="#00ff41",
+        block_border_color_dark="#00ff41",
+        block_border_width="2px",
+        block_shadow="0 0 15px rgba(0, 255, 65, 0.3)",
+        
+        # Radius
+        radius_lg="8px",
+        radius_md="6px",
+        radius_sm="4px",
+    )
+    
+    return theme
+
+# Custom CSS for additional cyberpunk styling
+CYBERPUNK_CSS = """
+/* Import cyberpunk font */
+@import url('https://fonts.googleapis.com/css2?family=Orbitron:wght@400;700;900&display=swap');
+
+/* Global styles */
+* {
+    font-family: 'Orbitron', monospace !important;
+}
+
+/* Animated background */
+.gradio-container {
+    background: linear-gradient(135deg, #0a0a0a, #1a0033, #001122);
+    background-size: 400% 400%;
+    animation: gradient-shift 15s ease infinite;
+}
+
+@keyframes gradient-shift {
+    0% { background-position: 0% 50%; }
+    50% { background-position: 100% 50%; }
+    100% { background-position: 0% 50%; }
+}
+
+/* Cyber header */
+.cyber-header {
+    background: linear-gradient(135deg, rgba(0,0,0,0.9), rgba(26,0,51,0.9));
+    border: 2px solid #00ff41;
+    border-radius: 10px;
+    padding: 30px;
+    margin-bottom: 30px;
+    position: relative;
+    overflow: hidden;
+    box-shadow: 0 0 30px rgba(0,255,65,0.5), inset 0 0 30px rgba(0,255,65,0.1);
+}
+
+.cyber-header::before {
+    content: '';
+    position: absolute;
+    top: -2px;
+    left: -2px;
+    right: -2px;
+    bottom: -2px;
+    background: linear-gradient(45deg, #00ff41, #8A2BE2, #00ff41);
+    z-index: -1;
+    animation: border-glow 3s linear infinite;
+    filter: blur(5px);
+}
+
+@keyframes border-glow {
+    0%, 100% { opacity: 1; }
+    50% { opacity: 0.5; }
+}
+
+/* Glitch text effect */
+.glitch-text {
+    position: relative;
+    color: #00ff41;
+    font-size: 3em;
+    font-weight: 900;
+    text-transform: uppercase;
+    text-shadow: 
+        0 0 10px #00ff41,
+        0 0 20px #00ff41,
+        0 0 40px #00ff41;
+    animation: text-glow 2s ease-in-out infinite alternate;
+}
+
+@keyframes text-glow {
+    from { text-shadow: 0 0 10px #00ff41, 0 0 20px #00ff41, 0 0 40px #00ff41; }
+    to { text-shadow: 0 0 20px #00ff41, 0 0 30px #00ff41, 0 0 50px #00ff41; }
+}
+
+.glitch-text::before,
+.glitch-text::after {
+    content: attr(data-text);
+    position: absolute;
+    top: 0;
+    left: 0;
+    width: 100%;
+    height: 100%;
+}
+
+.glitch-text::before {
+    animation: glitch-1 0.5s infinite;
+    color: #8A2BE2;
+    z-index: -1;
+}
+
+.glitch-text::after {
+    animation: glitch-2 0.5s infinite;
+    color: #00ff41;
+    z-index: -2;
+}
+
+@keyframes glitch-1 {
+    0% { clip: rect(44px, 450px, 56px, 0); transform: translate(0); }
+    20% { clip: rect(20px, 450px, 30px, 0); transform: translate(-2px, 2px); }
+    40% { clip: rect(85px, 450px, 95px, 0); transform: translate(2px, -2px); }
+    60% { clip: rect(10px, 450px, 20px, 0); transform: translate(-1px, 1px); }
+    80% { clip: rect(60px, 450px, 70px, 0); transform: translate(1px, -1px); }
+    100% { clip: rect(44px, 450px, 56px, 0); transform: translate(0); }
+}
+
+@keyframes glitch-2 {
+    0% { clip: rect(65px, 450px, 75px, 0); transform: translate(0); }
+    20% { clip: rect(30px, 450px, 40px, 0); transform: translate(2px, -2px); }
+    40% { clip: rect(90px, 450px, 100px, 0); transform: translate(-2px, 2px); }
+    60% { clip: rect(15px, 450px, 25px, 0); transform: translate(1px, -1px); }
+    80% { clip: rect(70px, 450px, 80px, 0); transform: translate(-1px, 1px); }
+    100% { clip: rect(65px, 450px, 75px, 0); transform: translate(0); }
+}
+
+/* Cyber subtitle */
+.cyber-subtitle {
+    color: #8A2BE2;
+    font-size: 1.2em;
+    text-transform: uppercase;
+    letter-spacing: 2px;
+    text-shadow: 0 0 10px #8A2BE2;
+}
+
+/* Pulse line */
+.pulse-line {
+    height: 2px;
+    background: linear-gradient(90deg, transparent, #00ff41, transparent);
+    margin: 20px 0;
+    animation: pulse-width 2s ease-in-out infinite;
+}
+
+@keyframes pulse-width {
+    0%, 100% { transform: scaleX(0.5); opacity: 0.5; }
+    50% { transform: scaleX(1); opacity: 1; }
+}
+
+/* Cyber containers */
+.cyber-container {
+    background: rgba(0, 0, 0, 0.8);
+    border: 2px solid #00ff41;
+    border-radius: 8px;
+    padding: 20px;
+    position: relative;
+    box-shadow: 
+        0 0 20px rgba(0, 255, 65, 0.3),
+        inset 0 0 20px rgba(0, 255, 65, 0.1);
+    animation: container-pulse 4s ease-in-out infinite;
+}
+
+@keyframes container-pulse {
+    0%, 100% { 
+        border-color: #00ff41;
+        box-shadow: 0 0 20px rgba(0, 255, 65, 0.3), inset 0 0 20px rgba(0, 255, 65, 0.1);
+    }
+    50% { 
+        border-color: #8A2BE2;
+        box-shadow: 0 0 30px rgba(138, 43, 226, 0.5), inset 0 0 20px rgba(138, 43, 226, 0.1);
+    }
+}
+
+/* Cyber buttons */
+.cyber-button {
+    background: linear-gradient(45deg, #00ff41, #8A2BE2);
+    border: none;
+    color: #000;
+    font-weight: bold;
+    text-transform: uppercase;
+    letter-spacing: 1px;
+    padding: 12px 24px;
+    border-radius: 6px;
+    position: relative;
+    overflow: hidden;
+    transition: all 0.3s ease;
+    box-shadow: 0 0 20px rgba(0, 255, 65, 0.5);
+}
+
+.cyber-button::before {
+    content: '';
+    position: absolute;
+    top: 50%;
+    left: 50%;
+    width: 0;
+    height: 0;
+    background: rgba(255, 255, 255, 0.3);
+    border-radius: 50%;
+    transform: translate(-50%, -50%);
+    transition: width 0.6s, height 0.6s;
+}
+
+.cyber-button:hover::before {
+    width: 300px;
+    height: 300px;
+}
+
+.cyber-button:hover {
+    transform: translateY(-2px);
+    box-shadow: 0 5px 30px rgba(0, 255, 65, 0.7);
+}
+
+/* Generate button special */
+.generate-button {
+    font-size: 1.2em;
+    animation: generate-pulse 2s ease-in-out infinite;
+}
+
+@keyframes generate-pulse {
+    0%, 100% { box-shadow: 0 0 20px rgba(0, 255, 65, 0.5); }
+    50% { box-shadow: 0 0 40px rgba(0, 255, 65, 0.8), 0 0 60px rgba(138, 43, 226, 0.5); }
+}
+
+/* Cyber inputs */
+.cyber-input input,
+.cyber-input textarea {
+    background: rgba(0, 0, 0, 0.9) !important;
+    border: 2px solid #00ff41 !important;
+    color: #00ff41 !important;
+    font-family: 'Orbitron', monospace !important;
+    transition: all 0.3s ease;
+}
+
+.cyber-input input:focus,
+.cyber-input textarea:focus {
+    border-color: #8A2BE2 !important;
+    box-shadow: 0 0 20px rgba(138, 43, 226, 0.5) !important;
+    outline: none !important;
+}
+
+/* Monster display */
+.monster-display {
+    background: rgba(0, 0, 0, 0.9);
+    border: 2px solid #00ff41;
+    border-radius: 10px;
+    padding: 20px;
+    position: relative;
+    overflow: hidden;
+}
+
+.monster-display::before {
+    content: '';
+    position: absolute;
+    top: 0;
+    left: -100%;
+    width: 100%;
+    height: 100%;
+    background: linear-gradient(90deg, transparent, rgba(0, 255, 65, 0.2), transparent);
+    animation: scan-line 3s linear infinite;
+}
+
+@keyframes scan-line {
+    0% { left: -100%; }
+    100% { left: 100%; }
+}
+
+/* Evolution display */
+.evolution-display {
+    background: linear-gradient(135deg, rgba(138, 43, 226, 0.2), rgba(0, 255, 65, 0.2));
+    border: 2px solid #8A2BE2;
+    border-radius: 8px;
+    padding: 15px;
+    margin: 10px 0;
+    animation: evolution-glow 2s ease-in-out infinite;
+}
+
+@keyframes evolution-glow {
+    0%, 100% { box-shadow: 0 0 20px rgba(138, 43, 226, 0.5); }
+    50% { box-shadow: 0 0 40px rgba(138, 43, 226, 0.8); }
+}
+
+/* Cyber dialogue */
+.cyber-dialogue {
+    font-size: 1.5em;
+    text-align: center;
+    color: #00ff41;
+    text-shadow: 0 0 10px currentColor;
+}
+
+/* Cyber stats */
+.cyber-stats {
+    font-family: 'Orbitron', monospace;
+    color: #00ff41;
+}
+
+.cyber-stats .label {
+    color: #8A2BE2;
+}
+
+/* Tab styling */
+.cyber-tabs .tab-nav {
+    background: rgba(0, 0, 0, 0.8);
+    border-bottom: 2px solid #00ff41;
+}
+
+.cyber-tabs .tab-nav button {
+    color: #00ff41 !important;
+    background: transparent !important;
+    border: none !important;
+    transition: all 0.3s ease;
+}
+
+.cyber-tabs .tab-nav button:hover {
+    background: rgba(0, 255, 65, 0.1) !important;
+    text-shadow: 0 0 10px #00ff41;
+}
+
+.cyber-tabs .tab-nav button.selected {
+    background: rgba(0, 255, 65, 0.2) !important;
+    border-bottom: 3px solid #00ff41 !important;
+}
+
+/* Loading animation */
+.loading {
+    display: inline-block;
+    width: 20px;
+    height: 20px;
+    border: 3px solid rgba(0, 255, 65, 0.3);
+    border-radius: 50%;
+    border-top-color: #00ff41;
+    animation: spin 1s ease-in-out infinite;
+}
+
+@keyframes spin {
+    to { transform: rotate(360deg); }
+}
+
+/* Scrollbar styling */
+::-webkit-scrollbar {
+    width: 10px;
+    height: 10px;
+}
+
+::-webkit-scrollbar-track {
+    background: rgba(0, 0, 0, 0.5);
+    border: 1px solid #00ff41;
+}
+
+::-webkit-scrollbar-thumb {
+    background: linear-gradient(45deg, #00ff41, #8A2BE2);
+    border-radius: 5px;
+}
+
+::-webkit-scrollbar-thumb:hover {
+    background: linear-gradient(45deg, #00ff41, #9932CC);
+}
+
+/* Mobile responsiveness */
+@media (max-width: 768px) {
+    .glitch-text {
+        font-size: 2em;
+    }
+    
+    .cyber-container {
+        padding: 15px;
+    }
+    
+    .cyber-button {
+        padding: 10px 20px;
+        font-size: 0.9em;
+    }
+}
+"""
\ No newline at end of file
diff --git a/src/utils/__init__.py b/utils/__init__.py
similarity index 100%
rename from src/utils/__init__.py
rename to utils/__init__.py
diff --git a/utils/caching.py b/utils/caching.py
new file mode 100644
index 0000000000000000000000000000000000000000..43e4af276b15ddefe32de6d3760ae8df1949b24f
--- /dev/null
+++ b/utils/caching.py
@@ -0,0 +1,360 @@
+import os
+import json
+import time
+import hashlib
+from pathlib import Path
+from typing import Any, Optional, Dict, Union
+import pickle
+import shutil
+from datetime import datetime, timedelta
+
+class ModelCache:
+    """Manages caching for AI models and generated content"""
+    
+    def __init__(self, cache_dir: Optional[Union[str, Path]] = None):
+        if cache_dir is None:
+            # Use HuggingFace Spaces persistent storage if available
+            if os.path.exists("/data"):
+                cache_dir = "/data/cache"
+            else:
+                cache_dir = Path.home() / ".cache" / "digipal"
+        
+        self.cache_dir = Path(cache_dir)
+        self.cache_dir.mkdir(parents=True, exist_ok=True)
+        
+        # Cache subdirectories
+        self.model_cache_dir = self.cache_dir / "models"
+        self.generation_cache_dir = self.cache_dir / "generations"
+        self.audio_cache_dir = self.cache_dir / "audio"
+        
+        for dir_path in [self.model_cache_dir, self.generation_cache_dir, self.audio_cache_dir]:
+            dir_path.mkdir(exist_ok=True)
+        
+        # Cache settings
+        self.max_cache_size_gb = 10  # Maximum cache size in GB
+        self.cache_expiry_days = 7   # Cache expiry in days
+        self.generation_cache_enabled = True
+        
+        # In-memory cache for fast access
+        self.memory_cache = {}
+        self.cache_stats = self._load_cache_stats()
+    
+    def cache_model_weights(self, model_id: str, model_data: Any) -> bool:
+        """Cache model weights to disk"""
+        try:
+            model_hash = self._get_hash(model_id)
+            cache_path = self.model_cache_dir / f"{model_hash}.pkl"
+            
+            with open(cache_path, 'wb') as f:
+                pickle.dump(model_data, f)
+            
+            # Update cache stats
+            self._update_cache_stats('model', model_id, cache_path.stat().st_size)
+            
+            return True
+        except Exception as e:
+            print(f"Failed to cache model {model_id}: {e}")
+            return False
+    
+    def get_cached_model(self, model_id: str) -> Optional[Any]:
+        """Retrieve cached model weights"""
+        try:
+            model_hash = self._get_hash(model_id)
+            cache_path = self.model_cache_dir / f"{model_hash}.pkl"
+            
+            if cache_path.exists():
+                # Check if cache is still valid
+                if self._is_cache_valid(cache_path):
+                    with open(cache_path, 'rb') as f:
+                        return pickle.load(f)
+            
+            return None
+        except Exception as e:
+            print(f"Failed to load cached model {model_id}: {e}")
+            return None
+    
+    def cache_generation(self, prompt: str, result: Dict[str, Any], 
+                        generation_type: str = "monster") -> str:
+        """Cache generation results"""
+        if not self.generation_cache_enabled:
+            return ""
+        
+        try:
+            # Create unique key for this generation
+            cache_key = self._get_generation_key(prompt, generation_type)
+            cache_dir = self.generation_cache_dir / generation_type / cache_key[:2]
+            cache_dir.mkdir(parents=True, exist_ok=True)
+            
+            cache_file = cache_dir / f"{cache_key}.json"
+            
+            # Prepare cache data
+            cache_data = {
+                'prompt': prompt,
+                'type': generation_type,
+                'timestamp': datetime.now().isoformat(),
+                'result': result
+            }
+            
+            # Handle file paths in results
+            if 'image' in result and hasattr(result['image'], 'save'):
+                image_path = cache_dir / f"{cache_key}_image.png"
+                result['image'].save(image_path)
+                cache_data['result']['image'] = str(image_path)
+            
+            if 'model_3d' in result and isinstance(result['model_3d'], str):
+                # Copy 3D model to cache
+                model_ext = Path(result['model_3d']).suffix
+                model_cache_path = cache_dir / f"{cache_key}_model{model_ext}"
+                shutil.copy2(result['model_3d'], model_cache_path)
+                cache_data['result']['model_3d'] = str(model_cache_path)
+            
+            # Save cache data
+            with open(cache_file, 'w') as f:
+                json.dump(cache_data, f, indent=2)
+            
+            # Update stats
+            self._update_cache_stats('generation', cache_key, cache_file.stat().st_size)
+            
+            return cache_key
+            
+        except Exception as e:
+            print(f"Failed to cache generation: {e}")
+            return ""
+    
+    def get_cached_generation(self, prompt: str, generation_type: str = "monster") -> Optional[Dict[str, Any]]:
+        """Retrieve cached generation if available"""
+        if not self.generation_cache_enabled:
+            return None
+        
+        try:
+            cache_key = self._get_generation_key(prompt, generation_type)
+            cache_file = self.generation_cache_dir / generation_type / cache_key[:2] / f"{cache_key}.json"
+            
+            if cache_file.exists() and self._is_cache_valid(cache_file):
+                with open(cache_file, 'r') as f:
+                    cache_data = json.load(f)
+                
+                # Load associated files
+                result = cache_data['result']
+                
+                if 'image' in result and isinstance(result['image'], str):
+                    from PIL import Image
+                    if os.path.exists(result['image']):
+                        result['image'] = Image.open(result['image'])
+                
+                return result
+            
+            return None
+            
+        except Exception as e:
+            print(f"Failed to load cached generation: {e}")
+            return None
+    
+    def cache_audio_transcription(self, audio_path: str, transcription: str) -> bool:
+        """Cache audio transcription results"""
+        try:
+            # Get audio file hash
+            with open(audio_path, 'rb') as f:
+                audio_hash = hashlib.md5(f.read()).hexdigest()
+            
+            cache_file = self.audio_cache_dir / f"{audio_hash}.json"
+            
+            cache_data = {
+                'audio_path': audio_path,
+                'transcription': transcription,
+                'timestamp': datetime.now().isoformat()
+            }
+            
+            with open(cache_file, 'w') as f:
+                json.dump(cache_data, f)
+            
+            return True
+            
+        except Exception as e:
+            print(f"Failed to cache audio transcription: {e}")
+            return False
+    
+    def get_cached_transcription(self, audio_path: str) -> Optional[str]:
+        """Get cached audio transcription"""
+        try:
+            with open(audio_path, 'rb') as f:
+                audio_hash = hashlib.md5(f.read()).hexdigest()
+            
+            cache_file = self.audio_cache_dir / f"{audio_hash}.json"
+            
+            if cache_file.exists() and self._is_cache_valid(cache_file):
+                with open(cache_file, 'r') as f:
+                    cache_data = json.load(f)
+                return cache_data['transcription']
+            
+            return None
+            
+        except Exception as e:
+            print(f"Failed to load cached transcription: {e}")
+            return None
+    
+    def add_to_memory_cache(self, key: str, value: Any, ttl_seconds: int = 300):
+        """Add item to in-memory cache with TTL"""
+        expiry_time = time.time() + ttl_seconds
+        self.memory_cache[key] = {
+            'value': value,
+            'expiry': expiry_time
+        }
+    
+    def get_from_memory_cache(self, key: str) -> Optional[Any]:
+        """Get item from in-memory cache"""
+        if key in self.memory_cache:
+            cache_item = self.memory_cache[key]
+            if time.time() < cache_item['expiry']:
+                return cache_item['value']
+            else:
+                # Remove expired item
+                del self.memory_cache[key]
+        return None
+    
+    def clear_expired_cache(self):
+        """Clear expired cache entries"""
+        current_time = datetime.now()
+        cleared_size = 0
+        
+        # Clear file cache
+        for cache_type in [self.model_cache_dir, self.generation_cache_dir, self.audio_cache_dir]:
+            for file_path in cache_type.rglob('*'):
+                if file_path.is_file():
+                    file_age = current_time - datetime.fromtimestamp(file_path.stat().st_mtime)
+                    if file_age > timedelta(days=self.cache_expiry_days):
+                        file_size = file_path.stat().st_size
+                        file_path.unlink()
+                        cleared_size += file_size
+        
+        # Clear memory cache
+        expired_keys = [
+            key for key, item in self.memory_cache.items()
+            if time.time() > item['expiry']
+        ]
+        for key in expired_keys:
+            del self.memory_cache[key]
+        
+        print(f"Cleared {cleared_size / (1024**2):.2f} MB of expired cache")
+        
+        return cleared_size
+    
+    def get_cache_size(self) -> Dict[str, float]:
+        """Get current cache size in MB"""
+        sizes = {
+            'models': 0,
+            'generations': 0,
+            'audio': 0,
+            'total': 0
+        }
+        
+        # Calculate directory sizes
+        for file_path in self.model_cache_dir.rglob('*'):
+            if file_path.is_file():
+                sizes['models'] += file_path.stat().st_size
+        
+        for file_path in self.generation_cache_dir.rglob('*'):
+            if file_path.is_file():
+                sizes['generations'] += file_path.stat().st_size
+        
+        for file_path in self.audio_cache_dir.rglob('*'):
+            if file_path.is_file():
+                sizes['audio'] += file_path.stat().st_size
+        
+        # Convert to MB
+        for key in sizes:
+            sizes[key] = sizes[key] / (1024 ** 2)
+        
+        sizes['total'] = sizes['models'] + sizes['generations'] + sizes['audio']
+        
+        return sizes
+    
+    def enforce_size_limit(self):
+        """Enforce cache size limit by removing oldest entries"""
+        cache_size = self.get_cache_size()
+        
+        if cache_size['total'] > self.max_cache_size_gb * 1024:  # Convert GB to MB
+            # Get all cache files with timestamps
+            all_files = []
+            
+            for cache_dir in [self.model_cache_dir, self.generation_cache_dir, self.audio_cache_dir]:
+                for file_path in cache_dir.rglob('*'):
+                    if file_path.is_file():
+                        all_files.append({
+                            'path': file_path,
+                            'size': file_path.stat().st_size,
+                            'mtime': file_path.stat().st_mtime
+                        })
+            
+            # Sort by modification time (oldest first)
+            all_files.sort(key=lambda x: x['mtime'])
+            
+            # Remove files until under limit
+            current_size = cache_size['total'] * (1024 ** 2)  # Convert to bytes
+            target_size = self.max_cache_size_gb * (1024 ** 3) * 0.8  # 80% of limit
+            
+            for file_info in all_files:
+                if current_size <= target_size:
+                    break
+                
+                file_info['path'].unlink()
+                current_size -= file_info['size']
+                print(f"Removed {file_info['path'].name} to enforce cache limit")
+    
+    def _get_hash(self, text: str) -> str:
+        """Get MD5 hash of text"""
+        return hashlib.md5(text.encode()).hexdigest()
+    
+    def _get_generation_key(self, prompt: str, generation_type: str) -> str:
+        """Get unique key for generation cache"""
+        combined = f"{generation_type}:{prompt}"
+        return self._get_hash(combined)
+    
+    def _is_cache_valid(self, cache_path: Path) -> bool:
+        """Check if cache file is still valid"""
+        if not cache_path.exists():
+            return False
+        
+        file_age = datetime.now() - datetime.fromtimestamp(cache_path.stat().st_mtime)
+        return file_age < timedelta(days=self.cache_expiry_days)
+    
+    def _load_cache_stats(self) -> Dict[str, Any]:
+        """Load cache statistics"""
+        stats_file = self.cache_dir / "cache_stats.json"
+        
+        if stats_file.exists():
+            with open(stats_file, 'r') as f:
+                return json.load(f)
+        
+        return {
+            'total_hits': 0,
+            'total_misses': 0,
+            'last_cleanup': datetime.now().isoformat(),
+            'entries': {}
+        }
+    
+    def _update_cache_stats(self, cache_type: str, key: str, size: int):
+        """Update cache statistics"""
+        self.cache_stats['entries'][key] = {
+            'type': cache_type,
+            'size': size,
+            'timestamp': datetime.now().isoformat()
+        }
+        
+        # Save stats
+        stats_file = self.cache_dir / "cache_stats.json"
+        with open(stats_file, 'w') as f:
+            json.dump(self.cache_stats, f, indent=2)
+    
+    def get_cache_info(self) -> Dict[str, Any]:
+        """Get cache information and statistics"""
+        sizes = self.get_cache_size()
+        
+        return {
+            'sizes': sizes,
+            'stats': self.cache_stats,
+            'memory_cache_items': len(self.memory_cache),
+            'cache_dir': str(self.cache_dir),
+            'max_size_gb': self.max_cache_size_gb,
+            'expiry_days': self.cache_expiry_days
+        }
\ No newline at end of file
diff --git a/utils/fallbacks.py b/utils/fallbacks.py
new file mode 100644
index 0000000000000000000000000000000000000000..079cb10ebfdbee35147fee209e33aea1e29d5c19
--- /dev/null
+++ b/utils/fallbacks.py
@@ -0,0 +1,348 @@
+import random
+import numpy as np
+from PIL import Image, ImageDraw, ImageFont
+from typing import Dict, Any, List, Tuple, Optional
+import json
+from datetime import datetime
+import trimesh
+
+class FallbackManager:
+    """Manages fallback strategies when AI models fail"""
+    
+    def __init__(self):
+        # Predefined fallback templates
+        self.monster_templates = {
+            'fire': {
+                'names': ['Pyromon', 'Blazefang', 'Emberclaw', 'Infernox'],
+                'colors': ['#ff4444', '#ff6600', '#ffaa00'],
+                'traits': ['aggressive', 'brave', 'fierce'],
+                'abilities': ['Flame Burst', 'Heat Wave', 'Fire Shield']
+            },
+            'water': {
+                'names': ['Aquamon', 'Tidalfin', 'Wavecrest', 'Hydropex'],
+                'colors': ['#4444ff', '#00aaff', '#00ffff'],
+                'traits': ['calm', 'wise', 'gentle'],
+                'abilities': ['Water Jet', 'Bubble Shield', 'Healing Wave']
+            },
+            'earth': {
+                'names': ['Terramon', 'Boulderback', 'Stoneguard', 'Geomancer'],
+                'colors': ['#885533', '#aa7744', '#665544'],
+                'traits': ['sturdy', 'patient', 'protective'],
+                'abilities': ['Rock Throw', 'Earth Shield', 'Quake']
+            },
+            'electric': {
+                'names': ['Voltmon', 'Sparkfang', 'Thunderclaw', 'Electrix'],
+                'colors': ['#ffff00', '#ffcc00', '#ffffaa'],
+                'traits': ['energetic', 'quick', 'playful'],
+                'abilities': ['Thunder Shock', 'Static Field', 'Lightning Speed']
+            },
+            'nature': {
+                'names': ['Floramon', 'Leafguard', 'Vineclaw', 'Botanix'],
+                'colors': ['#44ff44', '#00aa00', '#88ff88'],
+                'traits': ['peaceful', 'nurturing', 'wise'],
+                'abilities': ['Vine Whip', 'Healing Bloom', 'Nature Shield']
+            },
+            'neutral': {
+                'names': ['Digipet', 'Cybermon', 'Neobit', 'Alphacode'],
+                'colors': ['#888888', '#aaaaaa', '#cccccc'],
+                'traits': ['balanced', 'adaptable', 'loyal'],
+                'abilities': ['Tackle', 'Defense Boost', 'Quick Attack']
+            }
+        }
+        
+        # Emoji dialogue patterns
+        self.dialogue_patterns = {
+            'happy': ['😊', '😄', '🎉', '💖', '✨'],
+            'hungry': ['🍖', '🍗', '🥘', '😋', '🤤'],
+            'tired': ['😴', '💤', '🥱', '😪', '🛌'],
+            'excited': ['🤩', '🎊', '🔥', '⚡', '🌟'],
+            'sad': ['😢', '😔', '💔', '😞', '☔'],
+            'angry': ['😤', '💢', '😠', '🔥', '⚔️']
+        }
+    
+    def handle_stt_failure(self, text_input: Optional[str]) -> str:
+        """Fallback for speech-to-text failure"""
+        if text_input:
+            return text_input
+        
+        # Generate random description
+        templates = [
+            "Create a friendly digital monster companion",
+            "Design a unique creature with special powers",
+            "Make a loyal monster friend",
+            "Generate a mysterious digital being",
+            "Create an evolved cyber creature"
+        ]
+        
+        return random.choice(templates)
+    
+    def handle_text_gen_failure(self, description: str) -> Tuple[Dict[str, Any], str]:
+        """Fallback for text generation failure"""
+        
+        # Analyze description for keywords
+        element = self._detect_element(description)
+        template = self.monster_templates[element]
+        
+        # Generate traits
+        traits = {
+            'name': random.choice(template['names']) + str(random.randint(1, 99)),
+            'species': f"{element.capitalize()} Type Monster",
+            'element': element,
+            'personality': random.choice(template['traits']),
+            'color_scheme': f"Primary: {template['colors'][0]}, Secondary: {template['colors'][1]}",
+            'abilities': random.sample(template['abilities'], 2),
+            'description': description
+        }
+        
+        # Generate dialogue
+        mood = 'happy' if 'friendly' in description.lower() else 'excited'
+        dialogue = self._generate_emoji_dialogue(mood)
+        
+        return traits, dialogue
+    
+    def handle_image_gen_failure(self, description: str) -> Image.Image:
+        """Fallback for image generation failure"""
+        
+        # Create procedural monster image
+        width, height = 512, 512
+        image = Image.new('RGBA', (width, height), (0, 0, 0, 0))
+        draw = ImageDraw.Draw(image)
+        
+        # Detect element for color scheme
+        element = self._detect_element(description)
+        colors = self.monster_templates[element]['colors']
+        primary_color = colors[0]
+        secondary_color = colors[1] if len(colors) > 1 else colors[0]
+        
+        # Draw monster shape
+        self._draw_procedural_monster(draw, width, height, primary_color, secondary_color)
+        
+        return image
+    
+    def handle_3d_gen_failure(self, image: Optional[Image.Image]) -> trimesh.Trimesh:
+        """Fallback for 3D generation failure"""
+        
+        # Create simple 3D primitive
+        shapes = [
+            trimesh.creation.icosphere(subdivisions=2, radius=1.0),
+            trimesh.creation.box(extents=[1.5, 1.0, 1.0]),
+            trimesh.creation.cylinder(radius=0.8, height=1.5),
+            trimesh.creation.cone(radius=0.8, height=1.5)
+        ]
+        
+        base_shape = random.choice(shapes)
+        
+        # Add some deformation for variety
+        noise = np.random.normal(0, 0.05, base_shape.vertices.shape)
+        base_shape.vertices += noise
+        
+        # Smooth the result
+        base_shape = base_shape.smoothed()
+        
+        return base_shape
+    
+    def handle_rigging_failure(self, mesh: trimesh.Trimesh) -> trimesh.Trimesh:
+        """Fallback for rigging failure - return unrigged mesh"""
+        return mesh
+    
+    def complete_fallback_generation(self, description: str, generation_log: Dict) -> Dict[str, Any]:
+        """Complete fallback generation when entire pipeline fails"""
+        
+        # Generate all components using fallbacks
+        traits, dialogue = self.handle_text_gen_failure(description)
+        image = self.handle_image_gen_failure(description)
+        model_3d = self.handle_3d_gen_failure(image)
+        
+        # Save fallback results
+        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+        image_path = f"/tmp/fallback_monster_{timestamp}.png"
+        model_path = f"/tmp/fallback_monster_{timestamp}.glb"
+        
+        image.save(image_path)
+        model_3d.export(model_path)
+        
+        return {
+            'description': description,
+            'traits': traits,
+            'dialogue': dialogue,
+            'image': image,
+            'model_3d': model_path,
+            'download_files': [image_path, model_path],
+            'generation_log': generation_log,
+            'status': 'fallback',
+            'message': '⚡ Generated using quick fallback mode'
+        }
+    
+    def _detect_element(self, description: str) -> str:
+        """Detect element type from description"""
+        description_lower = description.lower()
+        
+        element_keywords = {
+            'fire': ['fire', 'flame', 'burn', 'hot', 'lava', 'ember', 'blaze'],
+            'water': ['water', 'aqua', 'ocean', 'sea', 'wave', 'liquid', 'swim'],
+            'earth': ['earth', 'rock', 'stone', 'ground', 'mountain', 'dirt', 'soil'],
+            'electric': ['electric', 'thunder', 'lightning', 'spark', 'volt', 'shock'],
+            'nature': ['nature', 'plant', 'tree', 'leaf', 'flower', 'grass', 'forest']
+        }
+        
+        for element, keywords in element_keywords.items():
+            if any(keyword in description_lower for keyword in keywords):
+                return element
+        
+        return 'neutral'
+    
+    def _generate_emoji_dialogue(self, mood: str) -> str:
+        """Generate emoji-based dialogue"""
+        emojis = self.dialogue_patterns.get(mood, self.dialogue_patterns['happy'])
+        
+        # Select 2-3 emojis
+        selected_emojis = random.sample(emojis, min(2, len(emojis)))
+        
+        # Add status numbers
+        hp = random.randint(70, 100)
+        happiness = random.randint(60, 95)
+        
+        dialogue = ''.join(selected_emojis)
+        dialogue += f"{hp}️⃣{happiness}️⃣"
+        
+        return dialogue
+    
+    def _draw_procedural_monster(self, draw: ImageDraw.Draw, width: int, height: int, 
+                                primary_color: str, secondary_color: str):
+        """Draw a procedural monster shape"""
+        
+        center_x, center_y = width // 2, height // 2
+        
+        # Body (main shape)
+        body_type = random.choice(['circle', 'oval', 'polygon'])
+        
+        if body_type == 'circle':
+            radius = random.randint(80, 120)
+            draw.ellipse(
+                [center_x - radius, center_y - radius, 
+                 center_x + radius, center_y + radius],
+                fill=primary_color,
+                outline=secondary_color,
+                width=3
+            )
+        elif body_type == 'oval':
+            width_r = random.randint(80, 120)
+            height_r = random.randint(100, 140)
+            draw.ellipse(
+                [center_x - width_r, center_y - height_r,
+                 center_x + width_r, center_y + height_r],
+                fill=primary_color,
+                outline=secondary_color,
+                width=3
+            )
+        else:  # polygon
+            num_points = random.randint(5, 8)
+            points = []
+            for i in range(num_points):
+                angle = (2 * np.pi * i) / num_points
+                r = random.randint(80, 120)
+                x = center_x + int(r * np.cos(angle))
+                y = center_y + int(r * np.sin(angle))
+                points.append((x, y))
+            draw.polygon(points, fill=primary_color, outline=secondary_color, width=3)
+        
+        # Eyes
+        eye_y = center_y - 30
+        eye_spacing = 40
+        eye_radius = 15
+        
+        # Left eye
+        draw.ellipse(
+            [center_x - eye_spacing - eye_radius, eye_y - eye_radius,
+             center_x - eye_spacing + eye_radius, eye_y + eye_radius],
+            fill='white',
+            outline='black',
+            width=2
+        )
+        # Pupil
+        draw.ellipse(
+            [center_x - eye_spacing - 5, eye_y - 5,
+             center_x - eye_spacing + 5, eye_y + 5],
+            fill='black'
+        )
+        
+        # Right eye
+        draw.ellipse(
+            [center_x + eye_spacing - eye_radius, eye_y - eye_radius,
+             center_x + eye_spacing + eye_radius, eye_y + eye_radius],
+            fill='white',
+            outline='black',
+            width=2
+        )
+        # Pupil
+        draw.ellipse(
+            [center_x + eye_spacing - 5, eye_y - 5,
+             center_x + eye_spacing + 5, eye_y + 5],
+            fill='black'
+        )
+        
+        # Add some features
+        features = random.randint(1, 3)
+        
+        if features >= 1:  # Add spikes or horns
+            for i in range(3):
+                spike_x = center_x + (i - 1) * 40
+                spike_y = center_y - 100
+                draw.polygon(
+                    [(spike_x - 10, spike_y + 20),
+                     (spike_x, spike_y),
+                     (spike_x + 10, spike_y + 20)],
+                    fill=secondary_color,
+                    outline='black',
+                    width=1
+                )
+        
+        if features >= 2:  # Add arms
+            # Left arm
+            draw.ellipse(
+                [center_x - 100, center_y - 20,
+                 center_x - 60, center_y + 20],
+                fill=primary_color,
+                outline=secondary_color,
+                width=2
+            )
+            # Right arm
+            draw.ellipse(
+                [center_x + 60, center_y - 20,
+                 center_x + 100, center_y + 20],
+                fill=primary_color,
+                outline=secondary_color,
+                width=2
+            )
+        
+        if features >= 3:  # Add pattern
+            pattern_type = random.choice(['spots', 'stripes'])
+            if pattern_type == 'spots':
+                for _ in range(5):
+                    spot_x = center_x + random.randint(-60, 60)
+                    spot_y = center_y + random.randint(-40, 40)
+                    draw.ellipse(
+                        [spot_x - 10, spot_y - 10,
+                         spot_x + 10, spot_y + 10],
+                        fill=secondary_color
+                    )
+    
+    def get_fallback_stats(self, element: str) -> Dict[str, int]:
+        """Get fallback stats based on element"""
+        base_stats = {
+            'fire': {'hp': 80, 'attack': 90, 'defense': 60, 'speed': 70, 'special': 85},
+            'water': {'hp': 90, 'attack': 70, 'defense': 80, 'speed': 65, 'special': 80},
+            'earth': {'hp': 100, 'attack': 75, 'defense': 95, 'speed': 50, 'special': 65},
+            'electric': {'hp': 70, 'attack': 80, 'defense': 60, 'speed': 95, 'special': 90},
+            'nature': {'hp': 85, 'attack': 65, 'defense': 75, 'speed': 70, 'special': 90},
+            'neutral': {'hp': 80, 'attack': 75, 'defense': 75, 'speed': 75, 'special': 75}
+        }
+        
+        stats = base_stats.get(element, base_stats['neutral']).copy()
+        
+        # Add some variation
+        for stat in stats:
+            stats[stat] += random.randint(-10, 10)
+            stats[stat] = max(10, min(150, stats[stat]))  # Clamp values
+        
+        return stats
\ No newline at end of file