BladeSzaSza commited on
Commit
fe24641
·
1 Parent(s): 19b4b73

new design

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .claude/settings.local.json +2 -20
  2. .gitattributes +0 -35
  3. .gitignore +67 -26
  4. CLAUDE.md +0 -235
  5. DEPLOYMENT.md +0 -213
  6. Dockerfile +0 -38
  7. PROJECT_ARCHITECTURE.md +0 -367
  8. QUICK_UI_TEST.md +0 -63
  9. README.md +158 -88
  10. app.py +388 -437
  11. {src/core → core}/__init__.py +0 -0
  12. core/ai_pipeline.py +309 -0
  13. core/auth_manager.py +101 -0
  14. core/game_mechanics.py +496 -0
  15. core/state_manager.py +280 -0
  16. docs/HUNYUAN3D_INTEGRATION.md +0 -264
  17. frontend/app.html +0 -12
  18. frontend/frontend/.gitignore +0 -23
  19. frontend/frontend/.npmrc +0 -1
  20. frontend/frontend/README.md +0 -38
  21. frontend/frontend/package.json +0 -23
  22. frontend/frontend/src/app.d.ts +0 -13
  23. frontend/frontend/src/app.html +0 -12
  24. frontend/frontend/src/routes/+page.svelte +0 -2
  25. frontend/frontend/static/favicon.png +0 -0
  26. frontend/frontend/svelte.config.js +0 -18
  27. frontend/frontend/tsconfig.json +0 -19
  28. frontend/frontend/vite.config.ts +0 -6
  29. frontend/package.json +0 -31
  30. frontend/postcss.config.js +0 -6
  31. frontend/src/app.css +0 -153
  32. frontend/src/routes/+layout.svelte +0 -5
  33. frontend/src/routes/+page.svelte +0 -14
  34. frontend/svelte.config.js +0 -14
  35. frontend/tailwind.config.js +0 -58
  36. frontend/tsconfig.json +0 -13
  37. frontend/vite.config.ts +0 -18
  38. game/__init__.py +1 -0
  39. models/__init__.py +1 -0
  40. models/image_generator.py +253 -0
  41. models/model_3d_generator.py +283 -0
  42. models/rigging_processor.py +546 -0
  43. models/stt_processor.py +154 -0
  44. models/text_generator.py +299 -0
  45. requirements.txt +40 -49
  46. run_digipal.py +0 -80
  47. src/ai/__init__.py +0 -1
  48. src/ai/qwen_processor.py +0 -621
  49. src/ai/speech_engine.py +0 -470
  50. src/core/evolution_system.py +0 -655
.claude/settings.local.json CHANGED
@@ -1,29 +1,11 @@
1
  {
2
  "permissions": {
3
  "allow": [
4
- "Bash(mkdir:*)",
5
- "Bash(python:*)",
6
- "Bash(rg:*)",
7
  "WebFetch(domain:huggingface.co)",
8
- "WebFetch(domain:huggingface.co)",
9
- "WebFetch(domain:huggingface.co)",
10
- "Bash(rm:*)",
11
  "Bash(ls:*)",
 
12
  "Bash(find:*)",
13
- "Bash(npm create:*)",
14
- "Bash(npx sv@latest create:*)",
15
- "Bash(git pull:*)",
16
- "Bash(git add:*)",
17
- "Bash(git commit:*)",
18
- "Bash(git push:*)",
19
- "Bash(grep:*)",
20
- "Bash(true)",
21
- "Bash(awk:*)",
22
- "Bash(git reset:*)",
23
- "WebFetch(domain:github.com)",
24
- "Bash(timeout:*)",
25
- "Bash(git rm:*)",
26
- "Bash(chmod:*)"
27
  ],
28
  "deny": []
29
  }
 
1
  {
2
  "permissions": {
3
  "allow": [
 
 
 
4
  "WebFetch(domain:huggingface.co)",
 
 
 
5
  "Bash(ls:*)",
6
+ "Bash(tree:*)",
7
  "Bash(find:*)",
8
+ "Bash(mkdir:*)"
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  ],
10
  "deny": []
11
  }
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore CHANGED
@@ -16,9 +16,11 @@ parts/
16
  sdist/
17
  var/
18
  wheels/
 
19
  *.egg-info/
20
  .installed.cfg
21
  *.egg
 
22
 
23
  # Virtual Environment
24
  venv/
@@ -27,47 +29,86 @@ env/
27
  .venv
28
 
29
  # IDE
30
- .idea/
31
  .vscode/
 
32
  *.swp
33
  *.swo
34
  *~
35
  .DS_Store
36
 
37
- # Logs
38
- logs/
39
- *.log
40
-
41
- # Data directories
42
- data/cache/
43
- data/models/
44
- data/saves/*.db
45
-
46
- # Frontend
47
- frontend/node_modules/
48
- frontend/.svelte-kit/
49
- frontend/build/
50
- frontend/.env
51
- frontend/.env.*
52
-
53
- # Local settings
54
- .claude/settings.local.json
55
-
56
- # Temporary files
57
- *.tmp
58
- *.temp
59
  .cache/
 
 
60
 
61
  # Model files
62
  *.bin
63
  *.pth
64
  *.pt
65
- *.gguf
66
- *.safetensors
67
  *.onnx
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  # HuggingFace
70
  .huggingface/
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  # OS files
73
- Thumbs.db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  sdist/
17
  var/
18
  wheels/
19
+ share/python-wheels/
20
  *.egg-info/
21
  .installed.cfg
22
  *.egg
23
+ MANIFEST
24
 
25
  # Virtual Environment
26
  venv/
 
29
  .venv
30
 
31
  # IDE
 
32
  .vscode/
33
+ .idea/
34
  *.swp
35
  *.swo
36
  *~
37
  .DS_Store
38
 
39
+ # Data and Cache
40
+ data/
41
+ cache/
42
+ *.cache
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  .cache/
44
+ tmp/
45
+ temp/
46
 
47
  # Model files
48
  *.bin
49
  *.pth
50
  *.pt
 
 
51
  *.onnx
52
+ *.safetensors
53
+ models_cache/
54
+
55
+ # Generated files
56
+ *.log
57
+ *.png
58
+ *.jpg
59
+ *.jpeg
60
+ *.gif
61
+ *.mp3
62
+ *.wav
63
+ *.glb
64
+ *.obj
65
+ *.fbx
66
+ *.dae
67
+
68
+ # Secrets
69
+ .env
70
+ .env.local
71
+ secrets.json
72
 
73
  # HuggingFace
74
  .huggingface/
75
 
76
+ # Gradio
77
+ flagged/
78
+ gradio_cached_examples/
79
+
80
+ # Testing
81
+ .pytest_cache/
82
+ .coverage
83
+ htmlcov/
84
+ .tox/
85
+ .nox/
86
+ coverage.xml
87
+ *.cover
88
+ .hypothesis/
89
+
90
+ # Documentation
91
+ docs/_build/
92
+ site/
93
+
94
  # OS files
95
+ Thumbs.db
96
+ ehthumbs.db
97
+ Desktop.ini
98
+ .Spotlight-V100
99
+ .Trashes
100
+
101
+ # Backup files
102
+ *.bak
103
+ *.backup
104
+ *~
105
+
106
+ # Custom
107
+ /data/users/*
108
+ /data/monsters/*
109
+ /data/models/*
110
+ /data/cache/*
111
+ !/data/users/.gitkeep
112
+ !/data/monsters/.gitkeep
113
+ !/data/models/.gitkeep
114
+ !/data/cache/.gitkeep
CLAUDE.md DELETED
@@ -1,235 +0,0 @@
1
- # CLAUDE.md
2
-
3
- This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
4
-
5
- ## Project Overview
6
-
7
- DigiPal is an advanced AI-powered virtual monster companion application built with Streamlit, featuring deep AI conversations using Qwen 2.5 models, Kyutai STT speech recognition, comprehensive monster care systems, sophisticated evolution mechanics, and cutting-edge 3D model generation via OmniGen2 → Hunyuan3D-2.1 → UniRig pipeline. This is a streamlined multi-component system designed for modern deployment with HuggingFace integration.
8
-
9
- ## Architecture
10
-
11
- ### Core Technologies
12
- - **Frontend**: Streamlit with modern cyberpunk UI design
13
- - **Backend**: FastAPI with WebSocket support for real-time updates
14
- - **AI Models**: Qwen 2.5-1.5B-Instruct for conversations, Kyutai STT-2.6b-en for speech
15
- - **3D Pipeline**: OmniGen2 → Hunyuan3D-2.1 → UniRig (text-to-image-to-3D-to-rigged)
16
- - **Framework**: Python 3.11+ with asyncio for concurrent operations
17
- - **Database**: SQLite for monster persistence with async operations
18
- - **Deployment**: Modern architecture with HuggingFace integration, Docker support
19
-
20
- ### Component Structure
21
- ```
22
- src/
23
- ├── ai/ # AI processing components
24
- │ ├── qwen_processor.py # Qwen 2.5 conversation engine
25
- │ └── speech_engine.py # Kyutai STT speech recognition
26
- ├── core/ # Core game logic
27
- │ ├── monster_engine.py # Monster stats, evolution, persistence
28
- │ ├── monster_engine_dw1.py # DW1-aligned monster mechanics (reference)
29
- │ └── evolution_system.py # Evolution mechanics
30
- ├── pipelines/ # 3D generation pipelines
31
- │ └── opensource_3d_pipeline_v2.py # Production 3D pipeline: OmniGen2→Hunyuan3D→UniRig
32
- ├── ui/ # User interface
33
- │ ├── streamlit_interface.py # Modern Streamlit interface
34
- │ └── state_manager.py # Browser state management
35
- ├── deployment/ # Deployment optimization
36
- │ └── zero_gpu_optimizer.py # Zero GPU resource management
37
- └── utils/ # Utilities
38
- └── performance_tracker.py # Performance monitoring
39
- ```
40
-
41
- ## Development Commands
42
-
43
- ### Running the Application
44
- ```bash
45
- # Run complete application (FastAPI + Streamlit)
46
- python run_digipal.py
47
-
48
- # Or run components separately:
49
-
50
- # Run FastAPI backend server
51
- python app.py
52
-
53
- # Run Streamlit frontend (in another terminal)
54
- streamlit run src/ui/streamlit_interface.py
55
-
56
- # Run with debug logging
57
- LOG_LEVEL=DEBUG python app.py
58
-
59
- # Run with specific configuration
60
- API_PORT=8081 python app.py
61
-
62
- # Run with MCP enabled
63
- MCP_ENDPOINT=https://your-mcp-server MCP_API_KEY=your-key python app.py
64
- ```
65
-
66
- ### Running the Svelte Frontend
67
- ```bash
68
- # Navigate to frontend directory
69
- cd frontend
70
-
71
- # Install dependencies (first time only)
72
- npm install
73
-
74
- # Run development server
75
- npm run dev
76
-
77
- # Build for production
78
- npm run build
79
-
80
- # Preview production build
81
- npm run preview
82
- ```
83
-
84
- ### Docker Development
85
- ```bash
86
- # Build Docker image
87
- docker build -t digipal .
88
-
89
- # Run Docker container
90
- docker run -p 7860:7860 digipal
91
-
92
- # Run with volume mounting for data persistence
93
- docker run -p 7860:7860 -v $(pwd)/data:/app/data digipal
94
- ```
95
-
96
- ### Development Tools
97
- ```bash
98
- # Code formatting (requires black installation)
99
- black src/
100
-
101
- # Linting (requires ruff installation)
102
- ruff src/
103
-
104
- # Testing (test suite not yet implemented)
105
- pytest
106
- ```
107
-
108
- ## Key Implementation Details
109
-
110
- ### Monster System
111
- - **Six-dimensional care system**: health, happiness, hunger, energy, discipline, cleanliness
112
- - **Real-time stat degradation**: continues even when application is offline
113
- - **Evolution stages**: egg → baby → child → adult → champion → ultimate
114
- - **Complex evolution requirements**: age, level, care quality, training, battles, social interaction
115
- - **Personality types**: friendly, energetic, calm, curious, brave with stat modifiers
116
- - **DW1 alignment**: Optional mode following Digimon World 1 mechanics
117
-
118
- ### AI Conversation System
119
- - **Qwen 2.5 integration** with quantization support (8-bit) for GPU efficiency
120
- - **Kyutai STT-2.6b-en** for high-quality speech-to-text conversion
121
- - **Context-aware conversations** with personality-based system prompts
122
- - **Mood-responsive dialogue** based on current monster stats
123
- - **Conversation history management** with automatic truncation
124
- - **Flash Attention 2** optimization when available
125
-
126
- ### 3D Generation Pipeline
127
- - **OmniGen2**: Advanced text-to-image generation with multi-view consistency
128
- - **Hunyuan3D-2.1**: State-of-the-art image-to-3D conversion via official HuggingFace Space API
129
- - **UniRig**: Automatic 3D model rigging via HuggingFace integration
130
- - **Complete Pipeline**: text → multi-view images → 3D mesh → rigged model
131
- - **Fallback Systems**: Graceful degradation when APIs are unavailable
132
- - **Model caching**: Efficient reuse of generated 3D assets
133
- - **Async generation**: Non-blocking 3D model creation
134
-
135
- ### State Management
136
- - **Async SQLite operations** for monster persistence
137
- - **Browser state management** for session continuity
138
- - **Time-based stat updates** calculated from last interaction
139
- - **Cross-session persistence** maintaining monster state between visits
140
-
141
- ### Zero GPU Optimization
142
- - **ZeroGPU decorator usage** with proper function-level application
143
- - **Resource detection** and optimization for Spaces deployment
144
- - **Memory management** with CUDA memory tracking
145
- - **Model quantization** for efficient GPU usage
146
- - **GPU wrapper functions** for AI model initialization and inference
147
-
148
- ## Database Schema
149
-
150
- Monsters are stored in SQLite with JSON serialization:
151
- ```sql
152
- CREATE TABLE monsters (
153
- id INTEGER PRIMARY KEY AUTOINCREMENT,
154
- name TEXT NOT NULL UNIQUE,
155
- data TEXT NOT NULL, -- JSON serialized Monster object
156
- created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
157
- updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
158
- )
159
- ```
160
-
161
- ## Environment Variables
162
-
163
- ### Core Configuration
164
- - `LOG_LEVEL`: Logging level (DEBUG, INFO, WARNING, ERROR)
165
- - `SERVER_NAME`: Server hostname (default: 0.0.0.0)
166
- - `SERVER_PORT`: Server port (default: 7860)
167
- - `SHARE`: Enable public sharing (true/false)
168
- - `DEBUG`: Enable debug mode (true/false)
169
- - `MAX_THREADS`: Maximum Gradio threads (default: 40)
170
-
171
- ### Feature Toggles (V2)
172
- - `ENABLE_3D`: Enable 3D generation features (default: true)
173
- - `ENABLE_AI`: Enable AI conversation features (default: true)
174
-
175
- ### GPU Configuration
176
- - `CUDA_VISIBLE_DEVICES`: GPU device selection
177
- - `TRANSFORMERS_CACHE`: Model cache directory
178
- - `HF_HOME`: HuggingFace cache directory
179
-
180
- ### MCP Configuration
181
- - `MCP_ENDPOINT`: MCP service endpoint URL
182
- - `MCP_API_KEY`: MCP service authentication key
183
-
184
- ## Hugging Face Space Configuration
185
-
186
- Configured as a Gradio Space with:
187
- - **SDK**: Gradio 5.34.2
188
- - **Hardware**: zero-gpu (ZeroGPU for efficient AI inference)
189
- - **Models**: Qwen/Qwen2.5-1.5B-Instruct, openai/whisper-base
190
- - **Storage**: medium (for model caching and monster data)
191
- - **Zero GPU**: @spaces.GPU decorators applied to AI-intensive functions
192
-
193
- ## ZeroGPU Implementation
194
-
195
- The application uses proper ZeroGPU decorator patterns:
196
-
197
- ```python
198
- # GPU wrapper functions for AI operations
199
- @spaces.GPU(duration=120)
200
- def gpu_generate_response(processor, prompt: str, generation_params: Dict[str, Any]) -> str:
201
- # GPU-intensive AI inference
202
-
203
- @spaces.GPU(duration=60)
204
- def gpu_model_initialization(model_class, model_name: str, **kwargs) -> Any:
205
- # GPU-intensive model loading
206
- ```
207
-
208
- Key ZeroGPU considerations:
209
- - Decorators applied at function level, not class level
210
- - Duration specified based on expected GPU usage time
211
- - Fallback to CPU operations when GPU not available
212
- - Wrapper functions handle Spaces environment detection
213
-
214
- ## MCP (Model Context Protocol) Integration
215
-
216
- DigiPal supports MCP for flexible model deployment:
217
- - **ModelProvider enum**: Includes MCP alongside HUGGINGFACE, LOCAL, and SPACES
218
- - **MCPFluxWrapper**: Integrates Flux text-to-image models through MCP
219
- - **Configuration options**: MCP endpoint and API key support
220
- - **Server mode**: Gradio interface can run as an MCP server
221
-
222
- ## Project Architecture
223
-
224
- ### Backend
225
- - **Unified app.py**: FastAPI server on port 7861 with all features enabled
226
- - **Gradio Admin**: Running on port 7860 as fallback/admin interface
227
- - **WebSocket Support**: Real-time updates for stats and model changes
228
-
229
- ### Frontend
230
- - **SvelteKit Application**: Located in `/frontend` directory
231
- - **Voice-First UI**: DigiVice-style interface with voice commands
232
- - **3D Rendering**: Using Threlte for monster visualization
233
- - **Cyberpunk-Retro Theme**: Custom styling with neon effects
234
-
235
- See [CLAUDE_SVELTE_FRONTEND_GUIDE.md](CLAUDE_SVELTE_FRONTEND_GUIDE.md) for detailed frontend documentation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
DEPLOYMENT.md DELETED
@@ -1,213 +0,0 @@
1
- # DigiPal Deployment Guide
2
-
3
- ## Quick Start
4
-
5
- ### Prerequisites
6
- - Python 3.11+
7
- - Node.js 18+ (for Svelte frontend, if using)
8
- - Git
9
-
10
- ### Installation
11
-
12
- 1. **Clone the repository:**
13
- ```bash
14
- git clone <repository-url>
15
- cd digiPal
16
- ```
17
-
18
- 2. **Install Python dependencies:**
19
- ```bash
20
- pip install -r requirements.txt
21
- ```
22
-
23
- 3. **Set up environment variables (optional):**
24
- ```bash
25
- export HF_TOKEN="your_huggingface_token" # For private models/spaces
26
- export MCP_ENDPOINT="your_mcp_endpoint" # For MCP integration
27
- export MCP_API_KEY="your_mcp_key"
28
- ```
29
-
30
- ### Running DigiPal
31
-
32
- #### Option 1: Complete Application (Recommended)
33
- ```bash
34
- python run_digipal.py
35
- ```
36
- This starts both the FastAPI backend and Streamlit frontend.
37
-
38
- **Access:**
39
- - **Streamlit UI**: http://localhost:8501
40
- - **API Backend**: http://localhost:7861
41
-
42
- #### Option 2: Manual Startup
43
- Terminal 1 (Backend):
44
- ```bash
45
- python app.py
46
- ```
47
-
48
- Terminal 2 (Frontend):
49
- ```bash
50
- streamlit run src/ui/streamlit_interface.py
51
- ```
52
-
53
- #### Option 3: Svelte Frontend (Advanced)
54
- ```bash
55
- # Terminal 1: Start backend
56
- python app.py
57
-
58
- # Terminal 2: Start Svelte frontend
59
- cd frontend
60
- npm install
61
- npm run dev
62
- ```
63
-
64
- ## Architecture Overview
65
-
66
- ### Technology Stack
67
- - **Frontend**: Streamlit (modern cyberpunk UI)
68
- - **Backend**: FastAPI with WebSocket support
69
- - **AI Models**:
70
- - Qwen 2.5-1.5B-Instruct (conversations)
71
- - Kyutai STT-2.6b-en (speech recognition)
72
- - **3D Pipeline**: OmniGen2 → Hunyuan3D-2.1 → UniRig
73
- - **Database**: SQLite with async operations
74
-
75
- ### API Endpoints
76
-
77
- **Monster Management:**
78
- - `GET /api/monsters` - List all monsters
79
- - `POST /api/monsters` - Create new monster
80
- - `GET /api/monsters/{id}` - Get monster details
81
- - `POST /api/monsters/{id}/action` - Perform care action
82
- - `POST /api/monsters/{id}/talk` - Send message to monster
83
- - `POST /api/monsters/{id}/generate-3d` - Generate 3D model
84
-
85
- **WebSocket:**
86
- - `WS /api/monsters/{id}/ws` - Real-time updates
87
-
88
- ## Configuration
89
-
90
- ### Environment Variables
91
-
92
- | Variable | Description | Default |
93
- |----------|-------------|---------|
94
- | `LOG_LEVEL` | Logging level | `INFO` |
95
- | `API_PORT` | FastAPI backend port | `7861` |
96
- | `HF_TOKEN` | HuggingFace API token | None |
97
- | `MCP_ENDPOINT` | MCP service endpoint | None |
98
- | `MCP_API_KEY` | MCP API key | None |
99
-
100
- ### Hardware Requirements
101
-
102
- **Minimum:**
103
- - 8GB RAM
104
- - 4GB free disk space
105
- - Internet connection (for HuggingFace APIs)
106
-
107
- **Recommended:**
108
- - 16GB RAM
109
- - NVIDIA GPU with 8GB+ VRAM
110
- - SSD storage
111
- - High-speed internet
112
-
113
- ## 3D Generation Pipeline
114
-
115
- The application uses a modern 3D generation pipeline:
116
-
117
- 1. **Text Input** → User describes their monster
118
- 2. **OmniGen2** → Generates multi-view images
119
- 3. **Hunyuan3D-2.1** → Converts images to 3D mesh
120
- 4. **UniRig** → Automatically rigs the 3D model
121
- 5. **Output** → Fully rigged 3D model ready for animation
122
-
123
- ### API Integration
124
- - **OmniGen2**: Via transformers/diffusers pipeline
125
- - **Hunyuan3D-2.1**: Via official HuggingFace Space API
126
- - **UniRig**: Via HuggingFace model repository
127
-
128
- ## Deployment Options
129
-
130
- ### Local Development
131
- Use the quick start guide above.
132
-
133
- ### Docker (Future)
134
- ```bash
135
- docker build -t digipal .
136
- docker run -p 7861:7861 -p 8501:8501 digipal
137
- ```
138
-
139
- ### HuggingFace Spaces
140
- 1. Fork/upload repository to HuggingFace Spaces
141
- 2. Set Space type to "Streamlit"
142
- 3. Configure secrets for HF_TOKEN if needed
143
- 4. Space will auto-deploy
144
-
145
- ## Troubleshooting
146
-
147
- ### Common Issues
148
-
149
- **Port Already in Use:**
150
- ```bash
151
- # Change ports
152
- API_PORT=8081 python app.py
153
- streamlit run src/ui/streamlit_interface.py --server.port 8502
154
- ```
155
-
156
- **Missing Dependencies:**
157
- ```bash
158
- pip install -r requirements.txt --upgrade
159
- ```
160
-
161
- **3D Generation Fails:**
162
- - Check internet connection
163
- - Verify HF_TOKEN if using private models
164
- - Pipeline includes fallback mechanisms
165
-
166
- **Streamlit Not Starting:**
167
- ```bash
168
- pip install streamlit --upgrade
169
- streamlit --version
170
- ```
171
-
172
- ### Performance Optimization
173
-
174
- **For GPU Systems:**
175
- - Ensure CUDA is properly installed
176
- - Models will automatically use GPU when available
177
-
178
- **For CPU-Only Systems:**
179
- - Increase timeout values for 3D generation
180
- - Consider using smaller model variants
181
-
182
- ## Monitoring
183
-
184
- ### Logs
185
- - Application logs: `logs/digipal.log`
186
- - Streamlit logs: Console output
187
- - FastAPI logs: Console output with timestamps
188
-
189
- ### Health Check
190
- ```bash
191
- curl http://localhost:7861/health
192
- ```
193
-
194
- ## Support
195
-
196
- For issues and questions:
197
- 1. Check this deployment guide
198
- 2. Review `CLAUDE.md` for development details
199
- 3. Check console logs for error messages
200
-
201
- ## New Tech Stack Summary
202
-
203
- **Replaced:**
204
- - Gradio → Streamlit (modern UI)
205
- - Faster Whisper → Kyutai STT-2.6b-en (better accuracy)
206
- - Complex 3D pipeline → Streamlined OmniGen2→Hunyuan3D→UniRig
207
-
208
- **Benefits:**
209
- - Modern, responsive UI with cyberpunk theme
210
- - Better speech recognition quality
211
- - State-of-the-art 3D generation pipeline
212
- - Simplified deployment and maintenance
213
- - Better separation of frontend/backend
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dockerfile DELETED
@@ -1,38 +0,0 @@
1
- FROM python:3.11-slim
2
-
3
- # Set environment variables
4
- ENV PYTHONUNBUFFERED=1
5
- ENV PYTHONDONTWRITEBYTECODE=1
6
- ENV TRANSFORMERS_CACHE=/app/data/cache
7
- ENV HF_HOME=/app/data/cache
8
-
9
- # Install system dependencies
10
- RUN apt-get update && apt-get install -y \
11
- git \
12
- ffmpeg \
13
- libsndfile1 \
14
- curl \
15
- && rm -rf /var/lib/apt/lists/*
16
-
17
- # Set working directory
18
- WORKDIR /app
19
-
20
- # Copy requirements and install Python dependencies
21
- COPY requirements.txt .
22
- RUN pip install --no-cache-dir -r requirements.txt
23
-
24
- # Copy application code
25
- COPY . .
26
-
27
- # Create necessary directories
28
- RUN mkdir -p data/saves data/models data/cache logs config
29
-
30
- # Expose ports for FastAPI backend and Streamlit frontend
31
- EXPOSE 7861 8501
32
-
33
- # Health check - check API server on port 7861
34
- HEALTHCHECK --interval=30s --timeout=30s --start-period=60s --retries=3 \
35
- CMD curl -f http://localhost:7861/health || exit 1
36
-
37
- # Run the complete application (FastAPI + Streamlit)
38
- CMD ["python", "run_digipal.py"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
PROJECT_ARCHITECTURE.md DELETED
@@ -1,367 +0,0 @@
1
- # DigiPal Project Architecture & Documentation
2
-
3
- ## 🏗️ System Overview
4
-
5
- DigiPal is a unified AI monster companion application that combines multiple technologies into a single, cohesive system. The architecture is designed to be modular, scalable, and deployable on Hugging Face Spaces with Zero GPU support.
6
-
7
- ```
8
- ┌─────────────────────────────────────────────────────────────────┐
9
- │ DIGIPAL ECOSYSTEM │
10
- ├─────────────────────────────────────────────────────────────────┤
11
- │ │
12
- │ ┌─────────────────┐ ┌─────────────────┐ ┌──────────────┐ │
13
- │ │ Svelte UI │ │ FastAPI │ │ Gradio │ │
14
- │ │ (Frontend) │◄──►│ (Backend) │◄──►│ (Admin) │ │
15
- │ │ Port: 5173 │ │ Port: 7861 │ │ Port: 7860 │ │
16
- │ └─────────────────┘ └─────────────────┘ └──────────────┘ │
17
- │ │ │ │ │
18
- │ └───────────────────────┼───────────────────────┘ │
19
- │ │ │
20
- │ ┌─────────────────────────────────┼─────────────────────────────┐ │
21
- │ │ CORE SYSTEMS │ │
22
- │ │ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ │
23
- │ │ │ Monster │ │ Evolution │ │ State │ │ │
24
- │ │ │ Engine │ │ System │ │ Manager │ │ │
25
- │ │ └─────────────┘ └─────────────┘ └─────────────┘ │ │
26
- │ └─────────────────────────────────────────────────────────────┘ │
27
- │ │ │
28
- │ ┌─────────────────────────────────┼─────────────────────────────┐ │
29
- │ │ AI SYSTEMS │ │
30
- │ │ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ │
31
- │ │ │ Qwen │ │ Speech │ │ 3D │ │ │
32
- │ │ │ Processor │ │ Engine │ │ Generation │ │ │
33
- │ │ └─────────────┘ └─────────────┘ └─────────────┘ │ │
34
- │ └─────────────────────────────────────────────────────────────┘ │
35
- │ │ │
36
- │ ┌─────────────────────────────────┼─────────────────────────────┐ │
37
- │ │ DEPLOYMENT & OPTIMIZATION │ │
38
- │ │ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ │
39
- │ │ │ Zero GPU │ │ Performance │ │ Spaces │ │ │
40
- │ │ │ Optimizer │ │ Tracker │ │ Integration │ │ │
41
- │ │ └─────────────┘ └─────────────┘ └─────────────┘ │ │
42
- │ └─────────────────────────────────────────────────────────────┘ │
43
- └────────────────────────────────────���────────────────────────────┘
44
- ```
45
-
46
- ## 🚀 How It All Works Together
47
-
48
- ### 1. **Unified Entry Point: `app.py`**
49
- The main application orchestrates everything:
50
-
51
- ```python
52
- # app.py - The Master Controller
53
- ├── FastAPI Server (Port 7861) - REST API & WebSocket
54
- ├── Gradio Interface (Port 7860) - Admin Panel & Fallback UI
55
- └── Svelte Frontend (Port 5173) - Modern Web Interface
56
- ```
57
-
58
- **Key Features:**
59
- - **Single Command**: `python app.py` starts everything
60
- - **Threading**: Gradio runs in separate thread, FastAPI in main thread
61
- - **Unified State**: All components share the same monster data
62
- - **Zero GPU Ready**: Automatically detects and optimizes for Hugging Face Spaces
63
-
64
- ### 2. **Component Breakdown**
65
-
66
- #### **Frontend Layer**
67
- ```
68
- Svelte UI (Port 5173)
69
- ├── Modern, responsive web interface
70
- ├── Real-time monster interactions
71
- ├── 3D model viewer
72
- ├── Voice chat interface
73
- └── Mini-games and training
74
- ```
75
-
76
- #### **Backend Layer**
77
- ```
78
- FastAPI (Port 7861)
79
- ├── REST API endpoints
80
- ├── WebSocket connections
81
- ├── Monster management
82
- ├── AI processing coordination
83
- └── 3D generation requests
84
- ```
85
-
86
- #### **Admin Layer**
87
- ```
88
- Gradio (Port 7860)
89
- ├── Admin panel for debugging
90
- ├── Fallback UI if Svelte fails
91
- ├── System monitoring
92
- ├── Direct monster creation
93
- └── Performance metrics
94
- ```
95
-
96
- ## 🔧 Core Systems Architecture
97
-
98
- ### **Monster Engine** (`src/core/monster_engine.py`)
99
- ```python
100
- class Monster:
101
- ├── Stats (health, happiness, hunger, energy)
102
- ├── Personality (traits, relationship level)
103
- ├── Lifecycle (age, stage, evolution)
104
- ├── Conversation history
105
- └── 3D model data
106
- ```
107
-
108
- ### **Evolution System** (`src/core/evolution_system.py`)
109
- ```python
110
- class EvolutionSystem:
111
- ├── Evolution requirements
112
- ├── Stage progression logic
113
- ├── Special evolution conditions
114
- └── Evolution history tracking
115
- ```
116
-
117
- ### **State Management** (`src/ui/state_manager.py`)
118
- ```python
119
- class AdvancedStateManager:
120
- ├── SQLite database persistence
121
- ├── Monster save/load operations
122
- ├── Conversation history storage
123
- └── Performance metrics tracking
124
- ```
125
-
126
- ## 🤖 AI Systems Architecture
127
-
128
- ### **Qwen Processor** (`src/ai/qwen_processor.py`)
129
- ```python
130
- class QwenProcessor:
131
- ├── Text generation for monster responses
132
- ├── Personality-aware conversations
133
- ├── Emotional impact calculation
134
- └── Fallback responses when AI unavailable
135
- ```
136
-
137
- ### **Speech Engine** (`src/ai/speech_engine.py`)
138
- ```python
139
- class AdvancedSpeechEngine:
140
- ├── Voice-to-text processing
141
- ├── Text-to-speech synthesis
142
- ├── Real-time audio streaming
143
- └── Multiple language support
144
- ```
145
-
146
- ### **3D Generation** (`src/pipelines/`)
147
- ```python
148
- # Multiple 3D generation options:
149
- ├── hunyuan3d_pipeline.py - Tencent's Hunyuan3D
150
- ├── opensource_3d_pipeline_v2.py - Production pipeline
151
- └── Integration with Hugging Face Spaces
152
- ```
153
-
154
- ## 🚀 Deployment Architecture
155
-
156
- ### **Local Development**
157
- ```
158
- ┌─────────────┐ ┌─────────────┐ ┌─────────────┐
159
- │ Svelte │ │ FastAPI │ │ Gradio │
160
- │ (5173) │◄──►│ (7861) │◄──►│ (7860) │
161
- └─────────────┘ └─────────────┘ └─────────────┘
162
- ```
163
-
164
- ### **Hugging Face Spaces Deployment**
165
- ```
166
- ┌─────────────────────────────────────────────────┐
167
- │ HUGGING FACE SPACES │
168
- │ ┌─────────────┐ ┌─────────────┐ │
169
- │ │ Gradio │ │ FastAPI │ │
170
- │ │ (7860) │◄──►│ (7861) │ │
171
- │ └─────────────┘ └─────────────┘ │
172
- │ │ │ │
173
- │ └─────────────────────┘ │
174
- │ │
175
- │ ┌─────────────────────────────────────────────┐ │
176
- │ │ ZERO GPU OPTIMIZATION │ │
177
- │ │ • Dynamic GPU allocation │ │
178
- │ │ • CPU fallback for AI models │ │
179
- │ │ • Memory optimization │ │
180
- │ │ • Spaces.GPU decorators │ │
181
- │ └─────────────────────────────────────────────┘ │
182
- └─────────────────────────────────────────────────┘
183
- ```
184
-
185
- ## 🔄 Data Flow
186
-
187
- ### **Monster Creation Flow**
188
- ```
189
- 1. User Input (Svelte/Gradio)
190
-
191
- 2. FastAPI Endpoint (/api/monsters)
192
-
193
- 3. Monster Engine (Create Monster)
194
-
195
- 4. State Manager (Save to Database)
196
-
197
- 5. Response (Monster Data + 3D Model)
198
- ```
199
-
200
- ### **Conversation Flow**
201
- ```
202
- 1. User Message (Text/Voice)
203
-
204
- 2. Speech Engine (if voice)
205
-
206
- 3. Qwen Processor (AI Response)
207
-
208
- 4. Monster State Update
209
-
210
- 5. State Manager (Save)
211
-
212
- 6. WebSocket Update (Real-time)
213
- ```
214
-
215
- ### **3D Generation Flow**
216
- ```
217
- 1. Monster Creation/Evolution
218
-
219
- 2. 3D Pipeline Selection
220
-
221
- 3. Multi-view Image Generation
222
-
223
- 4. 3D Model Creation
224
-
225
- 5. Texture Generation
226
-
227
- 6. Model Optimization
228
-
229
- 7. Database Storage
230
- ```
231
-
232
- ## 🎯 Key Features by Component
233
-
234
- ### **Svelte Frontend**
235
- - ✅ Modern, responsive UI
236
- - ✅ Real-time WebSocket updates
237
- - ✅ Voice chat interface
238
- - ✅ 3D model viewer
239
- - ✅ Mini-games
240
- - ✅ Mobile-friendly design
241
-
242
- ### **FastAPI Backend**
243
- - ✅ RESTful API endpoints
244
- - ✅ WebSocket real-time updates
245
- - ✅ Monster CRUD operations
246
- - ✅ AI processing coordination
247
- - ✅ 3D generation requests
248
- - ✅ Performance monitoring
249
-
250
- ### **Gradio Admin**
251
- - ✅ Admin panel interface
252
- - ✅ System monitoring
253
- - ✅ Direct monster management
254
- - ✅ Fallback UI
255
- - ✅ Debugging tools
256
-
257
- ### **AI Systems**
258
- - ✅ Qwen 2.5 text generation
259
- - ✅ Speech-to-text processing
260
- - ✅ Text-to-speech synthesis
261
- - ✅ Multiple 3D generation pipelines
262
- - ✅ Fallback responses
263
-
264
- ### **Core Systems**
265
- - ✅ Monster lifecycle management
266
- - ✅ Evolution system
267
- - ✅ Personality simulation
268
- - ✅ State persistence
269
- - ✅ Performance tracking
270
-
271
- ## 🚀 Getting Started
272
-
273
- ### **Local Development**
274
- ```bash
275
- # 1. Install dependencies
276
- pip install -r requirements.txt
277
-
278
- # 2. Start the application
279
- python app.py
280
-
281
- # 3. Access interfaces:
282
- # - Svelte UI: http://localhost:5173
283
- # - API: http://localhost:7861
284
- # - Gradio Admin: http://localhost:7860
285
- ```
286
-
287
- ### **Hugging Face Spaces**
288
- ```bash
289
- # 1. Push to repository
290
- git push origin main
291
-
292
- # 2. Spaces automatically deploys
293
- # 3. Access via Spaces URL
294
- ```
295
-
296
- ## 🔧 Configuration
297
-
298
- ### **Environment Variables**
299
- ```bash
300
- # Server Configuration
301
- SERVER_NAME=0.0.0.0
302
- SERVER_PORT=7860
303
- API_PORT=7861
304
-
305
- # AI Configuration
306
- MCP_ENDPOINT=your_mcp_endpoint
307
- MCP_API_KEY=your_api_key
308
-
309
- # Performance
310
- MAX_THREADS=40
311
- LOG_LEVEL=INFO
312
- ```
313
-
314
- ### **Zero GPU Optimization**
315
- ```python
316
- # Automatic detection and optimization
317
- ├── GPU available → Use CUDA
318
- ├── CPU only → Optimize for CPU
319
- ├── Memory constraints → Load smaller models
320
- └── Spaces environment → Apply @spaces.GPU decorators
321
- ```
322
-
323
- ## 📊 Performance Monitoring
324
-
325
- ### **Metrics Tracked**
326
- - Total interactions
327
- - Average response time
328
- - User satisfaction
329
- - AI model performance
330
- - 3D generation success rate
331
- - Memory usage
332
- - GPU utilization
333
-
334
- ### **Optimization Features**
335
- - Dynamic model loading
336
- - Memory-efficient processing
337
- - Caching strategies
338
- - Background updates
339
- - Graceful fallbacks
340
-
341
- ## 🎯 Why This Architecture?
342
-
343
- ### **Unified Experience**
344
- - Single entry point (`app.py`)
345
- - Shared state across all components
346
- - Consistent monster data
347
- - Real-time synchronization
348
-
349
- ### **Scalability**
350
- - Modular component design
351
- - Independent scaling of services
352
- - Load balancing ready
353
- - Cloud deployment optimized
354
-
355
- ### **Reliability**
356
- - Multiple UI options (Svelte + Gradio)
357
- - Fallback mechanisms
358
- - Error handling
359
- - Graceful degradation
360
-
361
- ### **Developer Experience**
362
- - Clear separation of concerns
363
- - Well-documented APIs
364
- - Easy testing
365
- - Hot reloading support
366
-
367
- This architecture ensures that DigiPal is both powerful and maintainable, with all the advanced features you've requested while keeping the codebase organized and easy to understand.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
QUICK_UI_TEST.md DELETED
@@ -1,63 +0,0 @@
1
- # 🎨 Quick UI Test Guide
2
-
3
- ## See the New DigiPal UI Now!
4
-
5
- ### Option 1: UI Only Preview (Fastest)
6
- ```bash
7
- python test_ui.py
8
- ```
9
- This shows you the new cyberpunk Streamlit interface without needing the backend.
10
-
11
- ### Option 2: Full Application
12
- ```bash
13
- python run_digipal.py
14
- ```
15
- This runs both backend and frontend for full functionality.
16
-
17
- ## What You'll See
18
-
19
- ### 🎨 **Modern Cyberpunk Theme:**
20
- - Dark gradient backgrounds with neon accents
21
- - Glowing cyan and magenta color scheme
22
- - Orbitron and Rajdhani fonts for sci-fi feel
23
- - Animated neon effects on titles and buttons
24
-
25
- ### 🖥️ **Interface Features:**
26
- - **Welcome Screen**: Feature overview with holographic styling
27
- - **Sidebar**: Monster management with neon buttons
28
- - **Monster Stats**: Holographic containers with progress bars
29
- - **Chat Interface**: Cyberpunk-styled conversation area
30
- - **3D Generation**: Modern controls for model creation
31
-
32
- ### 🚀 **Interactive Elements:**
33
- - Hover effects on buttons with glow animations
34
- - Gradient backgrounds that shift and pulse
35
- - Neon text effects with shadows
36
- - Holographic containers with backdrop blur
37
-
38
- ## Access URLs
39
-
40
- After starting:
41
- - **Streamlit UI**: http://localhost:8501
42
- - **API Backend**: http://localhost:7861 (if running full app)
43
-
44
- ## Notes
45
-
46
- - The UI test mode shows the interface but backend features won't work
47
- - Create a monster in the sidebar to see the full interface
48
- - All the cyberpunk styling and animations will be visible
49
- - The design is optimized for both desktop and tablet viewing
50
-
51
- ## Troubleshooting
52
-
53
- **If Streamlit won't start:**
54
- ```bash
55
- pip install streamlit --upgrade
56
- ```
57
-
58
- **If you see port conflicts:**
59
- ```bash
60
- STREAMLIT_PORT=8502 python test_ui.py
61
- ```
62
-
63
- Enjoy the new futuristic DigiPal experience! 🐉✨
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -1,127 +1,197 @@
1
  ---
2
- title: DigiPal Advanced Monster Companion
3
- emoji: 🐉
4
  colorFrom: purple
5
- colorTo: blue
6
- sdk: streamlit
7
- sdk_version: 1.25.0
8
- app_file: streamlit_app.py
9
  pinned: false
10
  license: mit
 
 
 
 
 
 
11
  models:
12
- - Qwen/Qwen2.5-1.5B-Instruct
13
  - kyutai/stt-2.6b-en
14
- - shitao/OmniGen-v1
 
15
  - tencent/Hunyuan3D-2.1
16
- - VAST-AI/UniRig
17
- datasets: []
18
  tags:
19
- - gaming
 
 
 
20
  - ai-companion
21
- - monster-raising
22
- - conversation
23
- - speech-recognition
24
- - 3d-generation
25
- - text-to-3d
26
- - cyberpunk
27
- - streamlit
28
- suggested_hardware: zero-a10g
29
- suggested_storage: medium
30
  ---
31
 
32
- # 🐉 DigiPal - Advanced AI Monster Companion
33
 
34
- **The most advanced AI-powered virtual monster companion with cutting-edge 3D generation!**
 
 
 
 
35
 
36
- ## 🚀 Revolutionary Features
37
 
38
- - 🤖 **Advanced AI Conversations** with Qwen 2.5-1.5B-Instruct
39
- - 🎤 **High-Quality Speech Recognition** with Kyutai STT-2.6b-en
40
- - 🎨 **State-of-the-Art 3D Generation** via OmniGen2 → Hunyuan3D-2.1 → UniRig
41
- - 📊 **Complex Care System** inspired by Digimon World mechanics
42
- - 🧬 **Dynamic Evolution** based on care quality and interaction
43
- - 💬 **Personality-Driven Responses** with emotional intelligence
44
- - 🎮 **Cyberpunk UI** with neon effects and holographic styling
45
 
46
- ## 🛠️ Technology Stack
47
 
48
- ### AI Models
49
- - **Conversations**: Qwen 2.5-1.5B-Instruct (quantized for efficiency)
50
- - **Speech-to-Text**: Kyutai STT-2.6b-en (latest multilingual model)
51
- - **Text-to-Image**: OmniGen2 (multi-view generation)
52
- - **Image-to-3D**: Hunyuan3D-2.1 (official Tencent model)
53
- - **3D Rigging**: UniRig (automatic model rigging)
54
 
55
- ### Architecture
56
- - **Frontend**: Streamlit with cyberpunk theme
57
- - **Backend**: Integrated FastAPI services
58
- - **Database**: SQLite with async operations
59
- - **3D Pipeline**: Complete text → image → 3D → rigged workflow
60
 
61
- ## 🎯 3D Generation Pipeline
 
 
 
 
 
 
62
 
63
- The crown jewel of DigiPal is its revolutionary 3D generation system:
64
 
65
- 1. **Text Description** → User describes their monster
66
- 2. **OmniGen2** → Generates consistent multi-view images
67
- 3. **Hunyuan3D-2.1** → Converts images to high-quality 3D mesh
68
- 4. **UniRig** → Automatically rigs the model for animation
69
- 5. **Result** → Fully rigged 3D model ready for games/animation
70
 
71
- ## 🎮 How to Use
72
 
73
- 1. **Create Your Monster**: Choose name and personality type
74
- 2. **Care & Interact**: Feed, train, play, and talk with your companion
75
- 3. **Watch Evolution**: Your monster grows based on care quality
76
- 4. **Generate 3D Model**: Create a unique 3D representation
77
- 5. **Download & Use**: Get your rigged model for other applications
78
 
79
- ## 🎨 Monster Care System
 
 
 
80
 
81
- - **Six Core Stats**: Health, Happiness, Hunger, Energy, Discipline, Cleanliness
82
- - **Real-Time Degradation**: Stats change even when you're away
83
- - **Evolution Stages**: Egg → Baby → Child → Adult → Champion → Ultimate
84
- - **Personality Types**: Friendly, Energetic, Calm, Curious, Brave
85
- - **Complex Requirements**: Age, level, care quality all matter
86
 
87
- ## 💫 Technical Highlights
 
 
88
 
89
- - **Zero GPU Optimization**: Efficient model loading and inference
90
- - **Graceful Fallbacks**: Pipeline continues even if some APIs fail
91
- - **Real-Time Updates**: WebSocket integration for live stat changes
92
- - **Model Caching**: Intelligent reuse of generated assets
93
- - **Cross-Platform**: Works on desktop, tablet, and mobile
94
 
95
- ## 🔧 Development
96
 
97
- ### Local Setup
98
- ```bash
99
- git clone <repository>
100
- cd digiPal
101
- pip install -r requirements.txt
102
 
103
- # Run complete application
104
- python run_digipal.py
 
 
105
 
106
- # Or run Streamlit only
107
- streamlit run streamlit_app.py
108
- ```
109
 
110
- ### Environment Variables
111
- ```bash
112
- HF_TOKEN=your_token # For private models
113
- MCP_ENDPOINT=your_endpoint # For MCP integration
114
- LOG_LEVEL=INFO # Logging level
115
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
- ## 📝 License
 
 
 
 
118
 
119
- MIT License - Feel free to use, modify, and distribute!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  ## 🤝 Contributing
122
 
123
- Contributions welcome! This project pushes the boundaries of AI companions and 3D generation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  ---
126
 
127
- *Experience the future of AI companions with DigiPal! 🐉✨*
 
 
 
1
  ---
2
+ title: DigiPal - AI Monster Companion
3
+ emoji: 🤖
4
  colorFrom: purple
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 4.16.0
8
+ app_file: app.py
9
  pinned: false
10
  license: mit
11
+ hardware: zero-gpu
12
+ hf_oauth: true
13
+ hf_oauth_scopes:
14
+ - read-repos
15
+ - write-repos
16
+ hf_oauth_expiration_minutes: 480
17
  models:
 
18
  - kyutai/stt-2.6b-en
19
+ - Qwen/Qwen2.5-0.5B-Instruct
20
+ - OmniGen2/OmniGen2
21
  - tencent/Hunyuan3D-2.1
 
 
22
  tags:
23
+ - game
24
+ - 3d
25
+ - voice
26
+ - monster
27
  - ai-companion
28
+ - digital-pet
 
 
 
 
 
 
 
 
29
  ---
30
 
31
+ # 🤖 DigiPal: AI-Powered Digital Monster Companion
32
 
33
+ <div align="center">
34
+
35
+ [![Hugging Face Spaces](https://img.shields.io/badge/🤗%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/your-username/DigiPal)
36
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
37
+ [![Python 3.10+](https://img.shields.io/badge/python-3.10+-blue.svg)](https://www.python.org/downloads/)
38
 
39
+ [Demo](https://huggingface.co/spaces/your-username/DigiPal) | [Report Bug](https://github.com/your-username/DigiPal/issues) | [Request Feature](https://github.com/your-username/DigiPal/issues)
40
 
41
+ </div>
 
 
 
 
 
 
42
 
43
+ ## 🌟 Overview
44
 
45
+ DigiPal brings the nostalgic charm of digital pet games into the AI era. Create, train, and evolve unique digital monsters using cutting-edge AI models. Inspired by classic games like Digimon World, DigiPal combines voice interaction, real-time 3D generation, and engaging gameplay mechanics.
 
 
 
 
 
46
 
47
+ ### ✨ Key Features
 
 
 
 
48
 
49
+ - 🎙️ **Voice Interaction**: Create monsters by describing them with your voice
50
+ - 🖼️ **AI-Powered Generation**: Unique monster designs generated in real-time
51
+ - 🦾 **3D Models**: Automatic conversion from 2D to rigged 3D models
52
+ - 🎮 **Classic Gameplay**: Training, evolution, and care mechanics
53
+ - 💾 **Persistent Storage**: Your monsters are saved across sessions
54
+ - 🤖 **Emoji Communication**: Monsters speak in emojis and numbers
55
+ - 🌐 **Zero GPU Optimized**: Runs efficiently on HuggingFace Spaces
56
 
57
+ ## 🚀 Getting Started
58
 
59
+ ### Online Demo
 
 
 
 
60
 
61
+ Visit our [HuggingFace Space](https://huggingface.co/spaces/your-username/DigiPal) to try DigiPal instantly!
62
 
63
+ ### Local Installation
 
 
 
 
64
 
65
+ ```bash
66
+ # Clone the repository
67
+ git clone https://github.com/your-username/DigiPal.git
68
+ cd DigiPal
69
 
70
+ # Install dependencies
71
+ pip install -r requirements.txt
 
 
 
72
 
73
+ # Run the application
74
+ python app.py
75
+ ```
76
 
77
+ ## 🎮 How to Play
 
 
 
 
78
 
79
+ ### 1. Create Your Monster
80
 
81
+ **Voice Control:**
82
+ - Click the microphone button
83
+ - Describe your ideal monster
84
+ - Example: "Create a fire-breathing dragon with blue scales"
 
85
 
86
+ **Visual Control:**
87
+ - Upload reference images
88
+ - Draw your monster concept
89
+ - Use the camera for real-world inspiration
90
 
91
+ ### 2. Train & Evolve
 
 
92
 
93
+ - Choose training types: Strength, Defense, Speed, Intelligence
94
+ - Complete training sessions to improve stats
95
+ - Meet evolution requirements to unlock new forms
96
+
97
+ ### 3. Care System
98
+
99
+ - **Feed**: Keep hunger above 30%
100
+ - **Play**: Maintain happiness above 40%
101
+ - **Rest**: Manage fatigue levels
102
+ - **Medicine**: Heal when health drops
103
+
104
+ ### 4. Monster Communication
105
+
106
+ Your monster communicates using emojis and numbers:
107
+ - 🤖💚 = Happy state
108
+ - 🍖❓ = Hungry
109
+ - 😴💤 = Tired
110
+ - Numbers represent HP and happiness percentages
111
+
112
+ ## 🏗️ Architecture
113
+
114
+ ### AI Pipeline
115
+
116
+ 1. **Speech Recognition**: Kyutai STT for voice commands
117
+ 2. **Text Generation**: Qwen2.5 for monster traits
118
+ 3. **Image Generation**: OmniGen2 for visual creation
119
+ 4. **3D Conversion**: Hunyuan3D for model generation
120
+ 5. **Rigging**: Automatic skeleton and animation
121
+
122
+ ### Technology Stack
123
 
124
+ - **Frontend**: Gradio with cyberpunk theming
125
+ - **Backend**: Python with HuggingFace Spaces
126
+ - **AI Models**: State-of-the-art transformers
127
+ - **Storage**: Persistent HF Spaces storage
128
+ - **Optimization**: Zero GPU with intelligent fallbacks
129
 
130
+ ## 📊 Game Mechanics
131
+
132
+ ### Stats System
133
+ - **HP**: Health points (10-999)
134
+ - **Attack**: Offensive power (5-500)
135
+ - **Defense**: Defensive capability (5-500)
136
+ - **Speed**: Movement and reaction (5-500)
137
+ - **Special**: Magic/unique abilities (5-500)
138
+
139
+ ### Evolution Stages
140
+ 1. **Rookie**: Starting form
141
+ 2. **Champion**: First evolution (150+ total stats)
142
+ 3. **Ultimate**: Advanced form (300+ total stats)
143
+ 4. **Mega**: Final evolution (500+ total stats)
144
+
145
+ ### Personality Types
146
+ - Brave, Timid, Aggressive, Gentle
147
+ - Playful, Serious, Loyal, Independent
148
+ - Each affects training preferences and dialogue
149
+
150
+ ## 🛠️ Advanced Features
151
+
152
+ ### Custom Modifications
153
+
154
+ Modify `core/game_mechanics.py` to:
155
+ - Add new evolution paths
156
+ - Create custom training types
157
+ - Implement new care mechanics
158
+
159
+ ### Model Swapping
160
+
161
+ Replace AI models in `models/` directory:
162
+ - Use different STT models
163
+ - Try alternative image generators
164
+ - Experiment with 3D converters
165
 
166
  ## 🤝 Contributing
167
 
168
+ We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
169
+
170
+ 1. Fork the repository
171
+ 2. Create your feature branch (`git checkout -b feature/AmazingFeature`)
172
+ 3. Commit changes (`git commit -m 'Add AmazingFeature'`)
173
+ 4. Push to branch (`git push origin feature/AmazingFeature`)
174
+ 5. Open a Pull Request
175
+
176
+ ## 📄 License
177
+
178
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
179
+
180
+ ## 🙏 Acknowledgments
181
+
182
+ - Inspired by Digimon World series
183
+ - Built with HuggingFace ecosystem
184
+ - Community feedback and contributions
185
+ - Open-source AI model creators
186
+
187
+ ## 📞 Contact & Support
188
+
189
+ - **Issues**: [GitHub Issues](https://github.com/your-username/DigiPal/issues)
190
+ - **Discussions**: [HuggingFace Community](https://huggingface.co/spaces/your-username/DigiPal/discussions)
191
+ - **Email**: your-email@example.com
192
 
193
  ---
194
 
195
+ <div align="center">
196
+ Made with ❤️ by the DigiPal Team
197
+ </div>
app.py CHANGED
@@ -1,468 +1,419 @@
1
- """
2
- DigiPal - Advanced AI Monster Companion with 3D Generation
3
- Unified application with all features enabled by default
4
- """
5
-
6
- import asyncio
7
- import json
8
- import logging
9
  import os
10
- import sys
11
- from pathlib import Path
12
- from typing import Dict, Any, Optional, List
13
- from datetime import datetime
14
- import uvicorn
15
- from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException
16
- from fastapi.middleware.cors import CORSMiddleware
17
- from fastapi.responses import JSONResponse
18
- from pydantic import BaseModel
19
  import torch
20
- from contextlib import asynccontextmanager
21
-
22
- # Add src to path
23
- sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
24
-
25
- # Configure logging
26
- logging.basicConfig(
27
- level=logging.INFO,
28
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
29
- )
30
- logger = logging.getLogger(__name__)
31
-
32
- # Environment configuration - All features enabled by default
33
- ENV_CONFIG = {
34
- "LOG_LEVEL": os.getenv("LOG_LEVEL", "INFO"),
35
- "SERVER_NAME": os.getenv("SERVER_NAME", "0.0.0.0"),
36
- "STREAMLIT_PORT": int(os.getenv("STREAMLIT_PORT", "8501")),
37
- "API_PORT": int(os.getenv("API_PORT", "7861")),
38
- "SHARE": os.getenv("SHARE", "false").lower() == "true",
39
- "DEBUG": os.getenv("DEBUG", "false").lower() == "true",
40
- "MAX_THREADS": int(os.getenv("MAX_THREADS", "40")),
41
- "MCP_ENDPOINT": os.getenv("MCP_ENDPOINT", ""),
42
- "MCP_API_KEY": os.getenv("MCP_API_KEY", ""),
43
- "HF_TOKEN": os.getenv("HF_TOKEN", "")
44
- }
45
-
46
- # HuggingFace Spaces detection
47
- IS_SPACES = os.getenv("SPACE_ID") is not None
48
-
49
- # API Models
50
- class CreateMonsterRequest(BaseModel):
51
- name: str
52
- personality: str
53
 
54
- class MonsterActionRequest(BaseModel):
55
- action: str
56
- params: Dict[str, Any] = {}
 
 
 
 
57
 
58
- class MonsterTalkRequest(BaseModel):
59
- message: str
 
 
 
 
 
60
 
61
- class Generate3DRequest(BaseModel):
62
- description: Optional[str] = None
 
 
 
 
63
 
64
- # Import core modules after environment setup
65
  try:
66
- from src.core.monster_engine import Monster, MonsterPersonalityType as PersonalityType
67
- from src.core.evolution_system import EvolutionSystem
68
- from src.ai.qwen_processor import QwenProcessor, ModelConfig
69
- from src.ai.speech_engine import AdvancedSpeechEngine as SpeechEngine, SpeechConfig
70
- from src.ui.state_manager import AdvancedStateManager as StateManager
71
- from src.deployment.zero_gpu_optimizer import get_optimal_device
72
- from src.pipelines.opensource_3d_pipeline_v2 import (
73
- ProductionPipeline,
74
- ProductionConfig
75
- )
76
-
77
- # UI imports - now using Streamlit (separate process)
78
- # from src.ui.streamlit_interface import main as streamlit_main
79
- except ImportError as e:
80
- logger.error(f"Failed to import required modules: {e}")
81
- sys.exit(1)
82
-
83
- # Initialize FastAPI app
84
- app = FastAPI(title="DigiPal API", version="1.0.0")
85
-
86
- # Add CORS middleware for frontend communication
87
- app.add_middleware(
88
- CORSMiddleware,
89
- allow_origins=["*"], # In production, replace with specific origins
90
- allow_credentials=True,
91
- allow_methods=["*"],
92
- allow_headers=["*"],
93
- )
94
-
95
- # Global state management
96
- class AppState:
97
- def __init__(self):
98
- self.monsters: Dict[str, Monster] = {}
99
- self.state_manager = StateManager()
100
- self.qwen_processor = None
101
- self.speech_engine = None
102
- self.evolution_system = EvolutionSystem()
103
- self.pipeline_3d = None
104
- self.active_connections: Dict[str, WebSocket] = {}
105
- self.initialized = False
106
-
107
- async def initialize(self):
108
- """Initialize AI components and pipelines"""
109
- if self.initialized:
110
- return
111
-
112
- logger.info("Initializing AI components...")
113
-
114
- # Initialize AI processors
115
- try:
116
- # Create Qwen processor config based on available resources
117
- qwen_config = ModelConfig(
118
- model_name="Qwen/Qwen2.5-1.5B-Instruct", # Smaller model for Spaces
119
- max_memory_gb=4.0, # Conservative memory usage
120
- inference_speed="fast", # Fast inference for Spaces
121
- use_quantization=True,
122
- use_flash_attention=True
123
- )
124
-
125
- self.qwen_processor = QwenProcessor(qwen_config)
126
-
127
- # Create speech engine config for Kyutai STT
128
- speech_config = SpeechConfig(
129
- model_name="kyutai/stt-2.6b-en", # Kyutai STT model
130
- device="auto", # Auto-detect device
131
- torch_dtype="float32", # Use float32 for better compatibility
132
- use_vad=True,
133
- vad_aggressiveness=2,
134
- use_pipeline=True # Use pipeline for easier integration
135
- )
136
-
137
- self.speech_engine = SpeechEngine(speech_config)
138
-
139
- # Initialize 3D pipeline
140
- logger.info("Using production pipeline for 3D generation")
141
- pipeline_config = ProductionConfig(
142
- hf_token=ENV_CONFIG.get("HF_TOKEN"), # Use proper HF_TOKEN
143
- device="cuda" if torch.cuda.is_available() else "cpu"
144
- )
145
-
146
- self.pipeline_3d = ProductionPipeline(pipeline_config)
147
-
148
- self.initialized = True
149
- logger.info("All components initialized successfully")
150
-
151
- except Exception as e:
152
- logger.error(f"Failed to initialize components: {e}")
153
- raise
154
-
155
- # Create global app state
156
- app_state = AppState()
157
-
158
- # Lifespan event handler
159
- @asynccontextmanager
160
- async def lifespan(app: FastAPI):
161
- """Lifespan event handler for FastAPI"""
162
- # Startup
163
- await app_state.initialize()
164
- yield
165
- # Shutdown
166
- pass
167
-
168
- # Update app with lifespan
169
- app.router.lifespan_context = lifespan
170
-
171
- # WebSocket connection manager
172
- class ConnectionManager:
173
- def __init__(self):
174
- self.active_connections: Dict[str, WebSocket] = {}
175
-
176
- async def connect(self, websocket: WebSocket, monster_id: str):
177
- await websocket.accept()
178
- self.active_connections[monster_id] = websocket
179
-
180
- def disconnect(self, monster_id: str):
181
- if monster_id in self.active_connections:
182
- del self.active_connections[monster_id]
183
-
184
- async def send_update(self, monster_id: str, data: dict):
185
- if monster_id in self.active_connections:
186
- try:
187
- await self.active_connections[monster_id].send_json(data)
188
- except:
189
- self.disconnect(monster_id)
190
 
191
- manager = ConnectionManager()
 
 
192
 
193
- # API Endpoints
194
- @app.get("/health")
195
- async def health_check():
196
- """Health check endpoint"""
197
- return {"status": "healthy", "initialized": app_state.initialized}
198
-
199
- @app.get("/api/monsters")
200
- async def list_monsters():
201
- """List all available saved monsters"""
202
- try:
203
- saved_monsters = await app_state.state_manager.list_saved_monsters()
204
- return {"monsters": saved_monsters}
205
- except Exception as e:
206
- logger.error(f"Error listing monsters: {e}")
207
- raise HTTPException(status_code=500, detail=str(e))
208
-
209
- @app.post("/api/monsters")
210
- async def create_monster(request: CreateMonsterRequest):
211
- """Create a new monster"""
212
- try:
213
- # Create new monster
214
- personality = PersonalityType[request.personality.upper()]
215
- monster = Monster(name=request.name, personality=personality)
216
-
217
- # Save to state
218
- app_state.monsters[monster.id] = monster
219
-
220
- # Save to database
221
- await app_state.state_manager.save_monster(monster)
222
-
223
- return {
224
- "id": monster.id,
225
- "name": monster.name,
226
- "personality": monster.personality.value,
227
- "stage": monster.stage.value,
228
- "stats": monster.get_stats()
229
- }
230
- except Exception as e:
231
- logger.error(f"Error creating monster: {e}")
232
- raise HTTPException(status_code=500, detail=str(e))
233
-
234
- @app.get("/api/monsters/{monster_id}")
235
- async def get_monster(monster_id: str):
236
- """Load a specific monster's full state"""
237
- try:
238
- # Check if already loaded
239
- if monster_id in app_state.monsters:
240
- monster = app_state.monsters[monster_id]
241
- else:
242
- # Load from database
243
- monster = await app_state.state_manager.load_monster_by_id(monster_id)
244
- if not monster:
245
- raise HTTPException(status_code=404, detail="Monster not found")
246
- app_state.monsters[monster_id] = monster
247
-
248
  return {
249
- "id": monster.id,
250
- "name": monster.name,
251
- "personality": monster.personality.value,
252
- "stage": monster.stage.value,
253
- "stats": monster.get_stats(),
254
- "model_url": monster.model_url,
255
- "conversation_history": monster.conversation_history[-10:] # Last 10 messages
256
  }
257
- except HTTPException:
258
- raise
259
- except Exception as e:
260
- logger.error(f"Error loading monster: {e}")
261
- raise HTTPException(status_code=500, detail=str(e))
262
-
263
- @app.post("/api/monsters/{monster_id}/action")
264
- async def perform_action(monster_id: str, request: MonsterActionRequest):
265
- """Perform a care action on the monster"""
266
  try:
267
- if monster_id not in app_state.monsters:
268
- raise HTTPException(status_code=404, detail="Monster not found")
269
-
270
- monster = app_state.monsters[monster_id]
271
- result = {}
272
-
273
- # Handle different actions
274
- if request.action == "feed":
275
- food_type = request.params.get("food_type", "balanced")
276
- result = monster.feed(food_type)
277
- elif request.action == "train":
278
- training_type = request.params.get("training_type", "strength")
279
- result = monster.train(training_type)
280
- elif request.action == "play":
281
- result = monster.play()
282
- elif request.action == "clean":
283
- result = monster.clean()
284
- elif request.action == "heal":
285
- result = monster.heal()
286
- elif request.action == "discipline":
287
- result = monster.discipline()
288
- elif request.action == "rest":
289
- result = monster.rest()
290
- else:
291
- raise HTTPException(status_code=400, detail=f"Unknown action: {request.action}")
292
-
293
- # Save state
294
- await app_state.state_manager.save_monster(monster)
295
 
296
- # Send real-time update
297
- await manager.send_update(monster_id, {
298
- "type": "stats_update",
299
- "stats": monster.get_stats(),
300
- "stage": monster.stage.value
301
  })
302
 
 
 
 
 
303
  return {
304
- "success": True,
305
- "result": result,
306
- "stats": monster.get_stats()
 
 
307
  }
308
- except HTTPException:
309
- raise
310
- except Exception as e:
311
- logger.error(f"Error performing action: {e}")
312
- raise HTTPException(status_code=500, detail=str(e))
313
-
314
- @app.post("/api/monsters/{monster_id}/talk")
315
- async def talk_to_monster(monster_id: str, request: MonsterTalkRequest):
316
- """Send a text message to the monster"""
317
- try:
318
- if monster_id not in app_state.monsters:
319
- raise HTTPException(status_code=404, detail="Monster not found")
320
-
321
- monster = app_state.monsters[monster_id]
322
-
323
- # Use MCP if available, otherwise use local processor
324
- if ENV_CONFIG["MCP_ENDPOINT"] and hasattr(app_state.qwen_processor, 'use_mcp'):
325
- response = await app_state.qwen_processor.generate_response_mcp(
326
- monster, request.message
327
- )
328
- else:
329
- response = app_state.qwen_processor.generate_response(
330
- monster, request.message
331
- )
332
-
333
- # Update conversation history
334
- monster.conversation_history.append({
335
- "role": "user",
336
- "content": request.message,
337
- "timestamp": datetime.now().isoformat()
338
- })
339
- monster.conversation_history.append({
340
- "role": "assistant",
341
- "content": response,
342
- "timestamp": datetime.now().isoformat()
343
- })
344
-
345
- # Save state
346
- await app_state.state_manager.save_monster(monster)
347
 
 
 
 
 
348
  return {
349
- "response": response,
350
- "stats": monster.get_stats()
 
 
 
351
  }
352
- except HTTPException:
353
- raise
354
- except Exception as e:
355
- logger.error(f"Error talking to monster: {e}")
356
- raise HTTPException(status_code=500, detail=str(e))
357
 
358
- @app.post("/api/monsters/{monster_id}/generate-3d")
359
- async def generate_3d_model(monster_id: str, request: Generate3DRequest):
360
- """Trigger 3D model generation for the monster"""
361
- try:
362
- if monster_id not in app_state.monsters:
363
- raise HTTPException(status_code=404, detail="Monster not found")
364
-
365
- monster = app_state.monsters[monster_id]
366
-
367
- # Generate description if not provided
368
- if not request.description:
369
- description = f"A {monster.personality.value} {monster.stage.value} digital monster"
370
- else:
371
- description = request.description
372
-
373
- # Generate 3D model
374
- logger.info(f"Generating 3D model for {monster.name}: {description}")
375
- model_path = await app_state.pipeline_3d.generate_3d_model(
376
- prompt=description,
377
- output_path=f"data/models/{monster_id}/model.glb"
 
 
378
  )
379
-
380
- # Update monster with model URL
381
- monster.model_url = f"/models/{monster_id}/{Path(model_path).name}"
382
- await app_state.state_manager.save_monster(monster)
383
-
384
- # Send update via WebSocket
385
- await manager.send_update(monster_id, {
386
- "type": "model_update",
387
- "model_url": monster.model_url
388
- })
389
-
390
- return {
391
- "success": True,
392
- "model_url": monster.model_url
393
- }
394
- except HTTPException:
395
- raise
396
- except Exception as e:
397
- logger.error(f"Error generating 3D model: {e}")
398
- raise HTTPException(status_code=500, detail=str(e))
399
 
400
- @app.websocket("/api/monsters/{monster_id}/ws")
401
- async def websocket_endpoint(websocket: WebSocket, monster_id: str):
402
- """WebSocket endpoint for real-time updates"""
403
- await manager.connect(websocket, monster_id)
404
 
405
- try:
406
- # Send initial stats
407
- if monster_id in app_state.monsters:
408
- monster = app_state.monsters[monster_id]
409
- await websocket.send_json({
410
- "type": "initial_state",
411
- "stats": monster.get_stats(),
412
- "stage": monster.stage.value,
413
- "model_url": monster.model_url
414
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415
 
416
- # Keep connection alive and handle stat degradation
417
- while True:
418
- await asyncio.sleep(30) # Update every 30 seconds
419
-
420
- if monster_id in app_state.monsters:
421
- monster = app_state.monsters[monster_id]
422
- monster.update_time_based_stats()
 
 
 
 
 
 
 
 
 
423
 
424
- await websocket.send_json({
425
- "type": "stats_update",
426
- "stats": monster.get_stats(),
427
- "stage": monster.stage.value
428
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429
 
430
- except WebSocketDisconnect:
431
- manager.disconnect(monster_id)
432
-
433
- # Streamlit interface runs separately
434
- # Use: streamlit run src/ui/streamlit_interface.py
435
-
436
- # Main entry point
437
- if __name__ == "__main__":
438
- # Create necessary directories
439
- os.makedirs("data/saves", exist_ok=True)
440
- os.makedirs("data/models", exist_ok=True)
441
- os.makedirs("data/cache", exist_ok=True)
442
- os.makedirs("logs", exist_ok=True)
443
-
444
- # Log startup info
445
- logger.info("=" * 60)
446
- logger.info("DigiPal - Advanced AI Monster Companion")
447
- logger.info("=" * 60)
448
- logger.info(f"Environment: {'HuggingFace Spaces' if IS_SPACES else 'Local'}")
449
- logger.info(f"FastAPI Backend Port: {ENV_CONFIG['API_PORT']}")
450
- logger.info(f"Streamlit UI: Run separately on port {ENV_CONFIG['STREAMLIT_PORT']}")
451
- logger.info(f"MCP Enabled: {bool(ENV_CONFIG['MCP_ENDPOINT'])}")
452
- logger.info("=" * 60)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
453
 
454
- # Start FastAPI server only
455
- # Streamlit interface runs separately via: streamlit run src/ui/streamlit_interface.py
456
- logger.info("Starting FastAPI backend server...")
457
- logger.info(f"Streamlit UI: Run 'streamlit run src/ui/streamlit_interface.py' in another terminal")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
458
 
459
- config = uvicorn.Config(
460
- app,
461
- host=ENV_CONFIG["SERVER_NAME"],
462
- port=ENV_CONFIG["API_PORT"],
463
- log_level=ENV_CONFIG["LOG_LEVEL"].lower()
 
 
 
 
 
 
 
464
  )
465
- server = uvicorn.Server(config)
466
 
467
- # Run FastAPI server
468
- asyncio.run(server.serve())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
 
 
 
 
 
 
3
  import os
4
+ import json
 
 
 
 
 
 
 
 
5
  import torch
6
+ import gc
7
+ from datetime import datetime
8
+ from pathlib import Path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ # Initialize directories
11
+ DATA_DIR = Path("/data") if os.path.exists("/data") else Path("./data")
12
+ DATA_DIR.mkdir(exist_ok=True)
13
+ (DATA_DIR / "users").mkdir(exist_ok=True)
14
+ (DATA_DIR / "monsters").mkdir(exist_ok=True)
15
+ (DATA_DIR / "models").mkdir(exist_ok=True)
16
+ (DATA_DIR / "cache").mkdir(exist_ok=True)
17
 
18
+ # Import modules (to be created)
19
+ from core.ai_pipeline import MonsterGenerationPipeline
20
+ from core.game_mechanics import GameMechanics
21
+ from core.state_manager import StateManager
22
+ from core.auth_manager import AuthManager
23
+ from ui.themes import get_cyberpunk_theme, CYBERPUNK_CSS
24
+ from ui.interfaces import create_voice_interface, create_visual_interface
25
 
26
+ # Initialize with GPU optimization
27
+ @spaces.GPU(duration=300)
28
+ def initialize_systems():
29
+ """Initialize all core systems with GPU"""
30
+ pipeline = MonsterGenerationPipeline()
31
+ return pipeline
32
 
33
+ # Initialize core systems
34
  try:
35
+ pipeline = initialize_systems()
36
+ except Exception as e:
37
+ print(f"GPU initialization failed, falling back to CPU: {e}")
38
+ pipeline = MonsterGenerationPipeline(device="cpu")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ game_mechanics = GameMechanics()
41
+ state_manager = StateManager(DATA_DIR)
42
+ auth_manager = AuthManager()
43
 
44
+ # Main generation function
45
+ @spaces.GPU(duration=180)
46
+ def generate_monster(oauth_profile, audio_input=None, text_input=None, reference_images=None,
47
+ training_focus="balanced", care_level="normal"):
48
+ """Generate a new monster with AI pipeline"""
49
+
50
+ if oauth_profile is None:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  return {
52
+ "message": "🔒 Please log in to create monsters!",
53
+ "image": None,
54
+ "model_3d": None,
55
+ "stats": None,
56
+ "dialogue": None
 
 
57
  }
58
+
59
+ user_id = oauth_profile.username if hasattr(oauth_profile, 'username') else str(oauth_profile)
60
+
 
 
 
 
 
 
61
  try:
62
+ # Generate monster using AI pipeline
63
+ result = pipeline.generate_monster(
64
+ audio_input=audio_input,
65
+ text_input=text_input,
66
+ reference_images=reference_images,
67
+ user_id=user_id
68
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
+ # Create game monster from AI result
71
+ monster = game_mechanics.create_monster(result, {
72
+ "training_focus": training_focus,
73
+ "care_level": care_level
 
74
  })
75
 
76
+ # Save to persistent storage
77
+ state_manager.save_monster(user_id, monster)
78
+
79
+ # Prepare response
80
  return {
81
+ "message": f"✨ {monster.name} has been created!",
82
+ "image": result.get('image'),
83
+ "model_3d": result.get('model_3d'),
84
+ "stats": monster.get_stats_display(),
85
+ "dialogue": result.get('dialogue', "🤖💚1️⃣0️⃣0️⃣")
86
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
+ except Exception as e:
89
+ print(f"Error generating monster: {str(e)}")
90
+ # Use fallback generation
91
+ fallback_result = pipeline.fallback_generation(text_input or "friendly digital creature")
92
  return {
93
+ "message": "⚡ Created using quick generation mode",
94
+ "image": fallback_result.get('image'),
95
+ "model_3d": None,
96
+ "stats": fallback_result.get('stats'),
97
+ "dialogue": "🤖❓9️⃣9️⃣"
98
  }
 
 
 
 
 
99
 
100
+ # Training function
101
+ def train_monster(oauth_profile, training_type, intensity):
102
+ """Train the active monster"""
103
+
104
+ if oauth_profile is None:
105
+ return "🔒 Please log in to train monsters!", None, None
106
+
107
+ user_id = oauth_profile.username if hasattr(oauth_profile, 'username') else str(oauth_profile)
108
+ current_monster = state_manager.get_current_monster(user_id)
109
+
110
+ if not current_monster:
111
+ return " No active monster to train!", None, None
112
+
113
+ # Apply training
114
+ result = game_mechanics.train_monster(current_monster, training_type, intensity)
115
+
116
+ if result['success']:
117
+ state_manager.update_monster(user_id, current_monster)
118
+ return (
119
+ result['message'],
120
+ current_monster.get_stats_display(),
121
+ result.get('evolution_check')
122
  )
123
+ else:
124
+ return result['message'], None, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
+ # Care functions
127
+ def feed_monster(oauth_profile, food_type):
128
+ """Feed the active monster"""
 
129
 
130
+ if oauth_profile is None:
131
+ return "🔒 Please log in to care for monsters!"
132
+
133
+ user_id = oauth_profile.username if hasattr(oauth_profile, 'username') else str(oauth_profile)
134
+ current_monster = state_manager.get_current_monster(user_id)
135
+
136
+ if not current_monster:
137
+ return " No active monster to feed!"
138
+
139
+ result = game_mechanics.feed_monster(current_monster, food_type)
140
+ state_manager.update_monster(user_id, current_monster)
141
+
142
+ return result['message']
143
+
144
+ # Build the Gradio interface
145
+ with gr.Blocks(
146
+ theme=get_cyberpunk_theme(),
147
+ css=CYBERPUNK_CSS,
148
+ title="DigiPal - Digital Monster Companion"
149
+ ) as demo:
150
+
151
+ # Header with cyberpunk styling
152
+ gr.HTML("""
153
+ <div class="cyber-header">
154
+ <h1 class="glitch-text">🤖 DigiPal 🤖</h1>
155
+ <p class="cyber-subtitle">Your AI-Powered Digital Monster Companion</p>
156
+ <div class="pulse-line"></div>
157
+ </div>
158
+ """)
159
+
160
+ # Authentication
161
+ with gr.Row():
162
+ login_btn = gr.LoginButton("🔐 Connect to Digital World", size="lg")
163
+ logout_btn = gr.LogoutButton("🔌 Disconnect", size="sm")
164
+ user_display = gr.Markdown("", elem_classes=["user-status"])
165
+
166
+ # Main interface tabs
167
+ with gr.Tabs(elem_classes=["cyber-tabs"]):
168
+
169
+ # Monster Creation Tab
170
+ with gr.TabItem("🧬 Create Monster", elem_classes=["cyber-tab-content"]):
171
+ with gr.Row():
172
+ # Input Column
173
+ with gr.Column(scale=1):
174
+ gr.Markdown("### 🎙️ Voice Input")
175
+ audio_input = gr.Audio(
176
+ label="Describe your monster",
177
+ sources=["microphone", "upload"],
178
+ type="filepath",
179
+ elem_classes=["cyber-input"]
180
+ )
181
+
182
+ gr.Markdown("### 💬 Text Input")
183
+ text_input = gr.Textbox(
184
+ label="Or type a description",
185
+ placeholder="Describe your ideal digital monster...",
186
+ lines=3,
187
+ elem_classes=["cyber-input"]
188
+ )
189
+
190
+ gr.Markdown("### 🖼️ Reference Images")
191
+ reference_images = gr.File(
192
+ label="Upload reference images (optional)",
193
+ file_count="multiple",
194
+ file_types=["image"],
195
+ elem_classes=["cyber-input"]
196
+ )
197
+
198
+ with gr.Row():
199
+ training_focus = gr.Radio(
200
+ choices=["balanced", "strength", "defense", "speed", "intelligence"],
201
+ label="Training Focus",
202
+ value="balanced",
203
+ elem_classes=["cyber-radio"]
204
+ )
205
+
206
+ generate_btn = gr.Button(
207
+ "⚡ Generate Monster",
208
+ variant="primary",
209
+ size="lg",
210
+ elem_classes=["cyber-button", "generate-button"]
211
+ )
212
+
213
+ # Output Column
214
+ with gr.Column(scale=1):
215
+ generation_message = gr.Markdown("", elem_classes=["cyber-message"])
216
+
217
+ monster_image = gr.Image(
218
+ label="Monster Appearance",
219
+ type="pil",
220
+ elem_classes=["monster-display"]
221
+ )
222
+
223
+ monster_model = gr.Model3D(
224
+ label="3D Model",
225
+ height=400,
226
+ elem_classes=["monster-display"]
227
+ )
228
+
229
+ monster_dialogue = gr.Textbox(
230
+ label="Monster Says",
231
+ interactive=False,
232
+ elem_classes=["cyber-dialogue"]
233
+ )
234
+
235
+ monster_stats = gr.JSON(
236
+ label="Stats",
237
+ elem_classes=["cyber-stats"]
238
+ )
239
 
240
+ # Monster Status Tab
241
+ with gr.TabItem("📊 Monster Status", elem_classes=["cyber-tab-content"]):
242
+ with gr.Row():
243
+ with gr.Column():
244
+ current_monster_display = gr.Model3D(
245
+ label="Your Digital Monster",
246
+ height=400,
247
+ elem_classes=["monster-display"]
248
+ )
249
+
250
+ monster_communication = gr.Textbox(
251
+ label="Monster Communication",
252
+ placeholder="Your monster speaks in emojis and numbers...",
253
+ interactive=False,
254
+ elem_classes=["cyber-dialogue"]
255
+ )
256
 
257
+ with gr.Column():
258
+ stats_display = gr.JSON(
259
+ label="Current Stats",
260
+ elem_classes=["cyber-stats"]
261
+ )
262
+
263
+ care_metrics = gr.JSON(
264
+ label="Care Status",
265
+ elem_classes=["cyber-stats"]
266
+ )
267
+
268
+ evolution_progress = gr.HTML(
269
+ elem_classes=["evolution-display"]
270
+ )
271
+
272
+ refresh_btn = gr.Button(
273
+ "🔄 Refresh Status",
274
+ elem_classes=["cyber-button"]
275
+ )
276
+
277
+ # Training Tab
278
+ with gr.TabItem("💪 Training", elem_classes=["cyber-tab-content"]):
279
+ with gr.Row():
280
+ with gr.Column():
281
+ training_type = gr.Radio(
282
+ choices=["Strength", "Defense", "Speed", "Intelligence", "Special"],
283
+ label="Training Type",
284
+ value="Strength",
285
+ elem_classes=["cyber-radio"]
286
+ )
287
+
288
+ training_intensity = gr.Slider(
289
+ minimum=1,
290
+ maximum=10,
291
+ value=5,
292
+ step=1,
293
+ label="Training Intensity",
294
+ elem_classes=["cyber-slider"]
295
+ )
296
+
297
+ train_btn = gr.Button(
298
+ "🏋️ Start Training",
299
+ variant="primary",
300
+ elem_classes=["cyber-button"]
301
+ )
302
 
303
+ with gr.Column():
304
+ training_result = gr.Textbox(
305
+ label="Training Result",
306
+ interactive=False,
307
+ elem_classes=["cyber-output"]
308
+ )
309
+
310
+ updated_stats = gr.JSON(
311
+ label="Updated Stats",
312
+ elem_classes=["cyber-stats"]
313
+ )
314
+
315
+ evolution_check = gr.HTML(
316
+ elem_classes=["evolution-display"]
317
+ )
318
+
319
+ # Care Tab
320
+ with gr.TabItem("❤️ Care", elem_classes=["cyber-tab-content"]):
321
+ with gr.Row():
322
+ with gr.Column():
323
+ gr.Markdown("### 🍖 Feeding")
324
+ food_type = gr.Radio(
325
+ choices=["Meat", "Fish", "Vegetable", "Treat", "Medicine"],
326
+ label="Select Food",
327
+ value="Meat",
328
+ elem_classes=["cyber-radio"]
329
+ )
330
+
331
+ feed_btn = gr.Button(
332
+ "🍽️ Feed Monster",
333
+ elem_classes=["cyber-button"]
334
+ )
335
+
336
+ feeding_result = gr.Textbox(
337
+ label="Feeding Result",
338
+ interactive=False,
339
+ elem_classes=["cyber-output"]
340
+ )
341
+
342
+ with gr.Column():
343
+ gr.Markdown("### 🎮 Interaction")
344
+
345
+ play_btn = gr.Button(
346
+ "🎾 Play",
347
+ elem_classes=["cyber-button"]
348
+ )
349
+
350
+ praise_btn = gr.Button(
351
+ "👏 Praise",
352
+ elem_classes=["cyber-button"]
353
+ )
354
+
355
+ scold_btn = gr.Button(
356
+ "👎 Scold",
357
+ elem_classes=["cyber-button"]
358
+ )
359
+
360
+ interaction_result = gr.Textbox(
361
+ label="Monster Response",
362
+ interactive=False,
363
+ elem_classes=["cyber-output"]
364
+ )
365
 
366
+ # Event handlers
367
+ generate_btn.click(
368
+ fn=generate_monster,
369
+ inputs=[
370
+ gr.State(lambda: gr.Request().username if hasattr(gr.Request(), 'username') else None),
371
+ audio_input,
372
+ text_input,
373
+ reference_images,
374
+ training_focus,
375
+ gr.State("normal") # care_level
376
+ ],
377
+ outputs=[
378
+ generation_message,
379
+ monster_image,
380
+ monster_model,
381
+ monster_stats,
382
+ monster_dialogue
383
+ ]
384
+ )
385
 
386
+ train_btn.click(
387
+ fn=train_monster,
388
+ inputs=[
389
+ gr.State(lambda: gr.Request().username if hasattr(gr.Request(), 'username') else None),
390
+ training_type,
391
+ training_intensity
392
+ ],
393
+ outputs=[
394
+ training_result,
395
+ updated_stats,
396
+ evolution_check
397
+ ]
398
  )
 
399
 
400
+ feed_btn.click(
401
+ fn=feed_monster,
402
+ inputs=[
403
+ gr.State(lambda: gr.Request().username if hasattr(gr.Request(), 'username') else None),
404
+ food_type
405
+ ],
406
+ outputs=[feeding_result]
407
+ )
408
+
409
+ # Launch the app
410
+ if __name__ == "__main__":
411
+ demo.queue(
412
+ default_concurrency_limit=10,
413
+ max_size=100
414
+ ).launch(
415
+ server_name="0.0.0.0",
416
+ server_port=7860,
417
+ show_api=False,
418
+ show_error=True
419
+ )
{src/core → core}/__init__.py RENAMED
File without changes
core/ai_pipeline.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import torch
3
+ import gc
4
+ import os
5
+ from typing import Optional, List, Dict, Any
6
+ from datetime import datetime
7
+ from pathlib import Path
8
+ import numpy as np
9
+ from PIL import Image
10
+ import tempfile
11
+
12
+ # Model imports (to be implemented)
13
+ from models.stt_processor import KyutaiSTTProcessor
14
+ from models.text_generator import QwenTextGenerator
15
+ from models.image_generator import OmniGenImageGenerator
16
+ from models.model_3d_generator import Hunyuan3DGenerator
17
+ from models.rigging_processor import UniRigProcessor
18
+ from utils.fallbacks import FallbackManager
19
+ from utils.caching import ModelCache
20
+
21
+ class MonsterGenerationPipeline:
22
+ """Main AI pipeline for monster generation"""
23
+
24
+ def __init__(self, device: str = "cuda"):
25
+ self.device = device if torch.cuda.is_available() else "cpu"
26
+ self.cache = ModelCache()
27
+ self.fallback_manager = FallbackManager()
28
+ self.models = {}
29
+ self.model_loaded = {
30
+ 'stt': False,
31
+ 'text_gen': False,
32
+ 'image_gen': False,
33
+ '3d_gen': False,
34
+ 'rigging': False
35
+ }
36
+
37
+ # Pipeline configuration
38
+ self.config = {
39
+ 'max_retries': 3,
40
+ 'timeout': 180,
41
+ 'enable_caching': True,
42
+ 'low_vram_mode': True
43
+ }
44
+
45
+ def _cleanup_memory(self):
46
+ """Clear GPU memory"""
47
+ if self.device == "cuda":
48
+ torch.cuda.empty_cache()
49
+ torch.cuda.synchronize()
50
+ gc.collect()
51
+
52
+ def _lazy_load_model(self, model_type: str):
53
+ """Lazy loading with memory optimization"""
54
+ if self.model_loaded[model_type]:
55
+ return self.models[model_type]
56
+
57
+ # Clear memory before loading new model
58
+ self._cleanup_memory()
59
+
60
+ try:
61
+ if model_type == 'stt':
62
+ self.models['stt'] = KyutaiSTTProcessor(device=self.device)
63
+ elif model_type == 'text_gen':
64
+ self.models['text_gen'] = QwenTextGenerator(device=self.device)
65
+ elif model_type == 'image_gen':
66
+ self.models['image_gen'] = OmniGenImageGenerator(device=self.device)
67
+ elif model_type == '3d_gen':
68
+ self.models['3d_gen'] = Hunyuan3DGenerator(device=self.device)
69
+ elif model_type == 'rigging':
70
+ self.models['rigging'] = UniRigProcessor(device=self.device)
71
+
72
+ self.model_loaded[model_type] = True
73
+ return self.models[model_type]
74
+
75
+ except Exception as e:
76
+ print(f"Failed to load {model_type}: {e}")
77
+ return None
78
+
79
+ def _unload_model(self, model_type: str):
80
+ """Unload model to free memory"""
81
+ if model_type in self.models and self.model_loaded[model_type]:
82
+ if hasattr(self.models[model_type], 'to'):
83
+ self.models[model_type].to('cpu')
84
+ del self.models[model_type]
85
+ self.model_loaded[model_type] = False
86
+ self._cleanup_memory()
87
+
88
+ @spaces.GPU(duration=300)
89
+ def generate_monster(self,
90
+ audio_input: Optional[str] = None,
91
+ text_input: Optional[str] = None,
92
+ reference_images: Optional[List] = None,
93
+ user_id: str = None) -> Dict[str, Any]:
94
+ """Main monster generation pipeline"""
95
+
96
+ generation_log = {
97
+ 'user_id': user_id,
98
+ 'timestamp': datetime.now().isoformat(),
99
+ 'stages_completed': [],
100
+ 'fallbacks_used': [],
101
+ 'success': False
102
+ }
103
+
104
+ try:
105
+ # Stage 1: Speech to Text (if audio provided)
106
+ description = ""
107
+ if audio_input and os.path.exists(audio_input):
108
+ try:
109
+ stt_model = self._lazy_load_model('stt')
110
+ if stt_model:
111
+ description = stt_model.transcribe(audio_input)
112
+ generation_log['stages_completed'].append('stt')
113
+ else:
114
+ raise Exception("STT model failed to load")
115
+ except Exception as e:
116
+ print(f"STT failed: {e}")
117
+ description = text_input or "Create a friendly digital monster"
118
+ generation_log['fallbacks_used'].append('stt')
119
+ finally:
120
+ # Unload STT to free memory
121
+ self._unload_model('stt')
122
+ else:
123
+ description = text_input or "Create a friendly digital monster"
124
+
125
+ # Stage 2: Generate monster characteristics
126
+ monster_traits = {}
127
+ monster_dialogue = ""
128
+ try:
129
+ text_gen = self._lazy_load_model('text_gen')
130
+ if text_gen:
131
+ monster_traits = text_gen.generate_traits(description)
132
+ monster_dialogue = text_gen.generate_dialogue(monster_traits)
133
+ generation_log['stages_completed'].append('text_gen')
134
+ else:
135
+ raise Exception("Text generation model failed to load")
136
+ except Exception as e:
137
+ print(f"Text generation failed: {e}")
138
+ monster_traits, monster_dialogue = self.fallback_manager.handle_text_gen_failure(description)
139
+ generation_log['fallbacks_used'].append('text_gen')
140
+ finally:
141
+ self._unload_model('text_gen')
142
+
143
+ # Stage 3: Generate monster image
144
+ monster_image = None
145
+ try:
146
+ image_gen = self._lazy_load_model('image_gen')
147
+ if image_gen:
148
+ # Create enhanced prompt from traits
149
+ image_prompt = self._create_image_prompt(description, monster_traits)
150
+ monster_image = image_gen.generate(
151
+ prompt=image_prompt,
152
+ reference_images=reference_images,
153
+ width=512,
154
+ height=512
155
+ )
156
+ generation_log['stages_completed'].append('image_gen')
157
+ else:
158
+ raise Exception("Image generation model failed to load")
159
+ except Exception as e:
160
+ print(f"Image generation failed: {e}")
161
+ monster_image = self.fallback_manager.handle_image_gen_failure(description)
162
+ generation_log['fallbacks_used'].append('image_gen')
163
+ finally:
164
+ self._unload_model('image_gen')
165
+
166
+ # Stage 4: Convert to 3D model
167
+ model_3d = None
168
+ model_3d_path = None
169
+ try:
170
+ model_3d_gen = self._lazy_load_model('3d_gen')
171
+ if model_3d_gen and monster_image:
172
+ model_3d = model_3d_gen.image_to_3d(monster_image)
173
+ # Save 3D model
174
+ model_3d_path = self._save_3d_model(model_3d, user_id)
175
+ generation_log['stages_completed'].append('3d_gen')
176
+ else:
177
+ raise Exception("3D generation failed")
178
+ except Exception as e:
179
+ print(f"3D generation failed: {e}")
180
+ model_3d = self.fallback_manager.handle_3d_gen_failure(monster_image)
181
+ generation_log['fallbacks_used'].append('3d_gen')
182
+ finally:
183
+ self._unload_model('3d_gen')
184
+
185
+ # Stage 5: Add rigging (optional, can be skipped for performance)
186
+ rigged_model = model_3d
187
+ if model_3d and self.config.get('enable_rigging', False):
188
+ try:
189
+ rigging_proc = self._lazy_load_model('rigging')
190
+ if rigging_proc:
191
+ rigged_model = rigging_proc.rig_mesh(model_3d)
192
+ generation_log['stages_completed'].append('rigging')
193
+ except Exception as e:
194
+ print(f"Rigging failed: {e}")
195
+ generation_log['fallbacks_used'].append('rigging')
196
+ finally:
197
+ self._unload_model('rigging')
198
+
199
+ # Prepare download files
200
+ download_files = self._prepare_download_files(
201
+ rigged_model or model_3d,
202
+ monster_image,
203
+ user_id
204
+ )
205
+
206
+ generation_log['success'] = True
207
+
208
+ return {
209
+ 'description': description,
210
+ 'traits': monster_traits,
211
+ 'dialogue': monster_dialogue,
212
+ 'image': monster_image,
213
+ 'model_3d': model_3d_path,
214
+ 'download_files': download_files,
215
+ 'generation_log': generation_log,
216
+ 'status': 'success'
217
+ }
218
+
219
+ except Exception as e:
220
+ generation_log['error'] = str(e)
221
+ print(f"Pipeline error: {e}")
222
+ return self.fallback_generation(description or "digital monster", generation_log)
223
+
224
+ def _create_image_prompt(self, base_description: str, traits: Dict) -> str:
225
+ """Create enhanced prompt for image generation"""
226
+ prompt_parts = [base_description]
227
+
228
+ if traits:
229
+ if 'appearance' in traits:
230
+ prompt_parts.append(traits['appearance'])
231
+ if 'personality' in traits:
232
+ prompt_parts.append(f"with {traits['personality']} personality")
233
+ if 'color_scheme' in traits:
234
+ prompt_parts.append(f"featuring {traits['color_scheme']} colors")
235
+
236
+ prompt_parts.extend([
237
+ "digital monster",
238
+ "creature design",
239
+ "game character",
240
+ "high quality",
241
+ "detailed"
242
+ ])
243
+
244
+ return ", ".join(prompt_parts)
245
+
246
+ def _save_3d_model(self, model_3d, user_id: str) -> str:
247
+ """Save 3D model to persistent storage"""
248
+ if not model_3d:
249
+ return None
250
+
251
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
252
+ filename = f"monster_{user_id}_{timestamp}.glb"
253
+
254
+ # Use HuggingFace Spaces persistent storage
255
+ if os.path.exists("/data"):
256
+ filepath = f"/data/models/{filename}"
257
+ else:
258
+ filepath = f"./data/models/{filename}"
259
+
260
+ os.makedirs(os.path.dirname(filepath), exist_ok=True)
261
+
262
+ # Save model (implementation depends on model format)
263
+ # This is a placeholder - actual implementation would depend on model format
264
+ with open(filepath, 'wb') as f:
265
+ if hasattr(model_3d, 'export'):
266
+ model_3d.export(f)
267
+ else:
268
+ # Fallback: save as binary data
269
+ f.write(str(model_3d).encode())
270
+
271
+ return filepath
272
+
273
+ def _prepare_download_files(self, model_3d, image, user_id: str) -> List[str]:
274
+ """Prepare downloadable files for user"""
275
+ files = []
276
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
277
+
278
+ # Save image
279
+ if image:
280
+ if isinstance(image, Image.Image):
281
+ image_path = f"/tmp/monster_{user_id}_{timestamp}.png"
282
+ image.save(image_path)
283
+ files.append(image_path)
284
+ elif isinstance(image, np.ndarray):
285
+ image_path = f"/tmp/monster_{user_id}_{timestamp}.png"
286
+ Image.fromarray(image).save(image_path)
287
+ files.append(image_path)
288
+
289
+ # Save 3D model in multiple formats if available
290
+ if model_3d:
291
+ # GLB format
292
+ glb_path = f"/tmp/monster_{user_id}_{timestamp}.glb"
293
+ files.append(glb_path)
294
+
295
+ # OBJ format (optional)
296
+ obj_path = f"/tmp/monster_{user_id}_{timestamp}.obj"
297
+ files.append(obj_path)
298
+
299
+ return files
300
+
301
+ def fallback_generation(self, description: str, generation_log: Dict) -> Dict[str, Any]:
302
+ """Complete fallback generation when pipeline fails"""
303
+ return self.fallback_manager.complete_fallback_generation(description, generation_log)
304
+
305
+ def cleanup(self):
306
+ """Clean up all loaded models"""
307
+ for model_type in list(self.models.keys()):
308
+ self._unload_model(model_type)
309
+ self._cleanup_memory()
core/auth_manager.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Dict, Any
2
+ from datetime import datetime, timedelta
3
+ import secrets
4
+ import json
5
+ from pathlib import Path
6
+
7
+ class AuthManager:
8
+ """Manages authentication for HuggingFace Spaces OAuth"""
9
+
10
+ def __init__(self):
11
+ # OAuth scopes for HuggingFace Spaces
12
+ self.oauth_scopes = [
13
+ "read-repos",
14
+ "write-repos"
15
+ ]
16
+
17
+ # Session management
18
+ self.sessions = {}
19
+ self.session_timeout = timedelta(hours=8)
20
+
21
+ def get_oauth_config(self) -> Dict[str, Any]:
22
+ """Get OAuth configuration for HuggingFace Spaces"""
23
+ return {
24
+ "provider": "huggingface",
25
+ "scopes": self.oauth_scopes,
26
+ "expiration_minutes": 480, # 8 hours
27
+ "allow_anonymous": False
28
+ }
29
+
30
+ def validate_session(self, session_token: str) -> Optional[Dict[str, Any]]:
31
+ """Validate a session token"""
32
+ if session_token in self.sessions:
33
+ session = self.sessions[session_token]
34
+ if datetime.now() < session['expires']:
35
+ # Update last access
36
+ session['last_access'] = datetime.now()
37
+ return session['user_data']
38
+ return None
39
+
40
+ def create_session(self, oauth_profile: Dict[str, Any]) -> str:
41
+ """Create a new session for authenticated user"""
42
+ session_token = secrets.token_urlsafe(32)
43
+
44
+ self.sessions[session_token] = {
45
+ 'user_data': {
46
+ 'username': oauth_profile.get('preferred_username', oauth_profile.get('username')),
47
+ 'name': oauth_profile.get('name', 'Anonymous'),
48
+ 'avatar_url': oauth_profile.get('picture', oauth_profile.get('avatar_url')),
49
+ 'auth_time': datetime.now().isoformat()
50
+ },
51
+ 'created': datetime.now(),
52
+ 'expires': datetime.now() + self.session_timeout,
53
+ 'last_access': datetime.now()
54
+ }
55
+
56
+ return session_token
57
+
58
+ def cleanup_expired_sessions(self):
59
+ """Remove expired sessions"""
60
+ current_time = datetime.now()
61
+ expired_tokens = [
62
+ token for token, session in self.sessions.items()
63
+ if current_time > session['expires']
64
+ ]
65
+
66
+ for token in expired_tokens:
67
+ del self.sessions[token]
68
+
69
+ def get_user_permissions(self, username: str) -> Dict[str, bool]:
70
+ """Get user permissions"""
71
+ # In HuggingFace Spaces, all authenticated users have same permissions
72
+ return {
73
+ 'can_create_monster': True,
74
+ 'can_train': True,
75
+ 'can_evolve': True,
76
+ 'can_battle': True,
77
+ 'can_export': True,
78
+ 'max_monsters': 10,
79
+ 'max_daily_generations': 50
80
+ }
81
+
82
+ def log_user_action(self, username: str, action: str, details: Dict = None):
83
+ """Log user actions for analytics"""
84
+ # This would typically write to a database or analytics service
85
+ # For HF Spaces, we'll just print for now
86
+ log_entry = {
87
+ 'timestamp': datetime.now().isoformat(),
88
+ 'username': username,
89
+ 'action': action,
90
+ 'details': details or {}
91
+ }
92
+ print(f"User Action: {json.dumps(log_entry)}")
93
+
94
+ def format_oauth_button_config(self) -> Dict[str, Any]:
95
+ """Format configuration for Gradio LoginButton"""
96
+ return {
97
+ "value": "Connect to Digital World",
98
+ "size": "lg",
99
+ "icon": "🔐",
100
+ "variant": "primary"
101
+ }
core/game_mechanics.py ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import random
3
+ from datetime import datetime, timedelta
4
+ from typing import Dict, List, Any, Optional
5
+ from dataclasses import dataclass, asdict
6
+ import numpy as np
7
+
8
+ @dataclass
9
+ class Monster:
10
+ """Monster data class"""
11
+ name: str
12
+ species: str
13
+ stage: str # rookie, champion, ultimate, mega
14
+ stats: Dict[str, int]
15
+ care_state: Dict[str, float]
16
+ personality: Dict[str, Any]
17
+ birth_time: datetime
18
+ evolution_time: Optional[datetime] = None
19
+ training_count: int = 0
20
+ battle_count: int = 0
21
+ happiness_events: List[str] = None
22
+ image_path: Optional[str] = None
23
+ model_3d_path: Optional[str] = None
24
+
25
+ def __post_init__(self):
26
+ if self.happiness_events is None:
27
+ self.happiness_events = []
28
+
29
+ def to_dict(self) -> Dict:
30
+ """Convert monster to dictionary for storage"""
31
+ data = asdict(self)
32
+ data['birth_time'] = self.birth_time.isoformat()
33
+ if self.evolution_time:
34
+ data['evolution_time'] = self.evolution_time.isoformat()
35
+ return data
36
+
37
+ @classmethod
38
+ def from_dict(cls, data: Dict) -> 'Monster':
39
+ """Create monster from dictionary"""
40
+ data['birth_time'] = datetime.fromisoformat(data['birth_time'])
41
+ if data.get('evolution_time'):
42
+ data['evolution_time'] = datetime.fromisoformat(data['evolution_time'])
43
+ return cls(**data)
44
+
45
+ def get_stats_display(self) -> Dict[str, Any]:
46
+ """Get formatted stats for display"""
47
+ return {
48
+ "name": self.name,
49
+ "species": self.species,
50
+ "stage": self.stage,
51
+ "level": self._calculate_level(),
52
+ "stats": {
53
+ "HP": f"{self.stats['hp']}/999",
54
+ "ATK": f"{self.stats['attack']}/500",
55
+ "DEF": f"{self.stats['defense']}/500",
56
+ "SPD": f"{self.stats['speed']}/500",
57
+ "SPC": f"{self.stats['special']}/500"
58
+ },
59
+ "care": {
60
+ "Hunger": f"{self.care_state['hunger']:.0f}%",
61
+ "Happiness": f"{self.care_state['happiness']:.0f}%",
62
+ "Fatigue": f"{self.care_state['fatigue']:.0f}%",
63
+ "Health": f"{self.care_state['health']:.0f}%"
64
+ },
65
+ "age": self._calculate_age()
66
+ }
67
+
68
+ def _calculate_level(self) -> int:
69
+ """Calculate monster level based on stats and experience"""
70
+ total_stats = sum(self.stats.values())
71
+ base_level = total_stats // 50
72
+ exp_bonus = (self.training_count + self.battle_count) // 10
73
+ return min(99, base_level + exp_bonus + 1)
74
+
75
+ def _calculate_age(self) -> str:
76
+ """Calculate monster age"""
77
+ age = datetime.now() - self.birth_time
78
+ if age.days > 0:
79
+ return f"{age.days} days"
80
+ elif age.seconds > 3600:
81
+ return f"{age.seconds // 3600} hours"
82
+ else:
83
+ return f"{age.seconds // 60} minutes"
84
+
85
+
86
+ class GameMechanics:
87
+ """Core game mechanics inspired by Digimon World 1"""
88
+
89
+ def __init__(self):
90
+ # Stat ranges and limits
91
+ self.stat_limits = {
92
+ 'hp': (10, 999),
93
+ 'attack': (5, 500),
94
+ 'defense': (5, 500),
95
+ 'speed': (5, 500),
96
+ 'special': (5, 500)
97
+ }
98
+
99
+ # Care thresholds
100
+ self.care_thresholds = {
101
+ 'hunger': {'critical': 20, 'low': 40, 'good': 70},
102
+ 'happiness': {'critical': 20, 'low': 40, 'good': 70},
103
+ 'fatigue': {'good': 30, 'tired': 60, 'exhausted': 80},
104
+ 'health': {'critical': 30, 'low': 50, 'good': 80}
105
+ }
106
+
107
+ # Training effectiveness modifiers
108
+ self.training_modifiers = {
109
+ 'strength': {'attack': 1.5, 'defense': 0.8, 'speed': 0.7},
110
+ 'defense': {'attack': 0.7, 'defense': 1.5, 'hp': 1.2},
111
+ 'speed': {'speed': 1.5, 'attack': 0.9, 'special': 0.8},
112
+ 'intelligence': {'special': 1.5, 'defense': 0.9, 'hp': 0.8},
113
+ 'balanced': {'attack': 1.0, 'defense': 1.0, 'speed': 1.0, 'special': 1.0}
114
+ }
115
+
116
+ # Evolution requirements (simplified)
117
+ self.evolution_requirements = {
118
+ 'champion': {
119
+ 'min_stats': 150, # Total stats
120
+ 'min_care': 60, # Average care percentage
121
+ 'min_age': 1, # Days
122
+ 'training_count': 10
123
+ },
124
+ 'ultimate': {
125
+ 'min_stats': 300,
126
+ 'min_care': 70,
127
+ 'min_age': 3,
128
+ 'training_count': 30
129
+ },
130
+ 'mega': {
131
+ 'min_stats': 500,
132
+ 'min_care': 80,
133
+ 'min_age': 7,
134
+ 'training_count': 50
135
+ }
136
+ }
137
+
138
+ def create_monster(self, generation_result: Dict[str, Any], user_preferences: Dict = None) -> Monster:
139
+ """Create a new monster from AI generation results"""
140
+
141
+ traits = generation_result.get('traits', {})
142
+ preferences = user_preferences or {}
143
+
144
+ # Generate base stats based on traits and preferences
145
+ base_stats = self._generate_base_stats(traits, preferences.get('training_focus', 'balanced'))
146
+
147
+ # Initialize care state
148
+ care_state = {
149
+ 'hunger': 80.0,
150
+ 'happiness': 90.0,
151
+ 'fatigue': 10.0,
152
+ 'health': 100.0
153
+ }
154
+
155
+ # Determine personality from traits
156
+ personality = self._determine_personality(traits)
157
+
158
+ # Create monster name
159
+ name = traits.get('name', self._generate_name(traits))
160
+
161
+ # Create monster instance
162
+ monster = Monster(
163
+ name=name,
164
+ species=traits.get('species', 'DigiPal'),
165
+ stage='rookie',
166
+ stats=base_stats,
167
+ care_state=care_state,
168
+ personality=personality,
169
+ birth_time=datetime.now(),
170
+ image_path=generation_result.get('image'),
171
+ model_3d_path=generation_result.get('model_3d')
172
+ )
173
+
174
+ return monster
175
+
176
+ def _generate_base_stats(self, traits: Dict, focus: str) -> Dict[str, int]:
177
+ """Generate base stats based on traits and focus"""
178
+ # Base values
179
+ base = {
180
+ 'hp': random.randint(50, 100),
181
+ 'attack': random.randint(15, 35),
182
+ 'defense': random.randint(15, 35),
183
+ 'speed': random.randint(15, 35),
184
+ 'special': random.randint(15, 35)
185
+ }
186
+
187
+ # Apply focus modifiers
188
+ if focus in self.training_modifiers:
189
+ for stat, modifier in self.training_modifiers[focus].items():
190
+ if stat in base:
191
+ base[stat] = int(base[stat] * modifier)
192
+
193
+ # Apply trait-based modifiers
194
+ if traits.get('element') == 'fire':
195
+ base['attack'] += 10
196
+ base['special'] += 5
197
+ elif traits.get('element') == 'water':
198
+ base['defense'] += 10
199
+ base['hp'] += 20
200
+ elif traits.get('element') == 'earth':
201
+ base['defense'] += 15
202
+ base['hp'] += 10
203
+ elif traits.get('element') == 'wind':
204
+ base['speed'] += 15
205
+ base['special'] += 5
206
+
207
+ # Ensure stats are within limits
208
+ for stat in base:
209
+ base[stat] = max(self.stat_limits[stat][0],
210
+ min(self.stat_limits[stat][1], base[stat]))
211
+
212
+ return base
213
+
214
+ def _determine_personality(self, traits: Dict) -> Dict[str, Any]:
215
+ """Determine monster personality from traits"""
216
+ personality_traits = [
217
+ 'brave', 'timid', 'aggressive', 'gentle',
218
+ 'playful', 'serious', 'loyal', 'independent'
219
+ ]
220
+
221
+ # Select primary trait
222
+ primary = traits.get('personality', random.choice(personality_traits))
223
+
224
+ # Generate personality profile
225
+ return {
226
+ 'primary': primary,
227
+ 'likes': self._generate_likes(primary),
228
+ 'dislikes': self._generate_dislikes(primary),
229
+ 'training_preference': self._get_training_preference(primary),
230
+ 'battle_style': self._get_battle_style(primary)
231
+ }
232
+
233
+ def _generate_name(self, traits: Dict) -> str:
234
+ """Generate a name if not provided"""
235
+ prefixes = ['Digi', 'Cyber', 'Tech', 'Neo', 'Alpha', 'Beta']
236
+ suffixes = ['mon', 'pal', 'byte', 'bit', 'tron', 'x']
237
+
238
+ prefix = random.choice(prefixes)
239
+ suffix = random.choice(suffixes)
240
+
241
+ return f"{prefix}{suffix}"
242
+
243
+ def _generate_likes(self, personality: str) -> List[str]:
244
+ """Generate things the monster likes based on personality"""
245
+ likes_map = {
246
+ 'brave': ['battles', 'challenges', 'meat'],
247
+ 'timid': ['quiet places', 'vegetables', 'praise'],
248
+ 'aggressive': ['training', 'meat', 'battles'],
249
+ 'gentle': ['praise', 'vegetables', 'playing'],
250
+ 'playful': ['games', 'treats', 'attention'],
251
+ 'serious': ['training', 'discipline', 'fish'],
252
+ 'loyal': ['praise', 'companionship', 'meat'],
253
+ 'independent': ['exploration', 'variety', 'fish']
254
+ }
255
+ return likes_map.get(personality, ['food', 'play', 'rest'])
256
+
257
+ def _generate_dislikes(self, personality: str) -> List[str]:
258
+ """Generate things the monster dislikes based on personality"""
259
+ dislikes_map = {
260
+ 'brave': ['running away', 'vegetables', 'rest'],
261
+ 'timid': ['battles', 'loud noises', 'scolding'],
262
+ 'aggressive': ['vegetables', 'rest', 'gentle training'],
263
+ 'gentle': ['battles', 'scolding', 'meat'],
264
+ 'playful': ['discipline', 'vegetables', 'being ignored'],
265
+ 'serious': ['games', 'treats', 'slacking'],
266
+ 'loyal': ['being alone', 'scolding', 'betrayal'],
267
+ 'independent': ['clingy behavior', 'routine', 'vegetables']
268
+ }
269
+ return dislikes_map.get(personality, ['scolding', 'hunger', 'fatigue'])
270
+
271
+ def _get_training_preference(self, personality: str) -> str:
272
+ """Get preferred training type based on personality"""
273
+ preferences = {
274
+ 'brave': 'strength',
275
+ 'timid': 'defense',
276
+ 'aggressive': 'strength',
277
+ 'gentle': 'intelligence',
278
+ 'playful': 'speed',
279
+ 'serious': 'balanced',
280
+ 'loyal': 'defense',
281
+ 'independent': 'speed'
282
+ }
283
+ return preferences.get(personality, 'balanced')
284
+
285
+ def _get_battle_style(self, personality: str) -> str:
286
+ """Get battle style based on personality"""
287
+ styles = {
288
+ 'brave': 'offensive',
289
+ 'timid': 'defensive',
290
+ 'aggressive': 'berserker',
291
+ 'gentle': 'support',
292
+ 'playful': 'trickster',
293
+ 'serious': 'tactical',
294
+ 'loyal': 'guardian',
295
+ 'independent': 'adaptive'
296
+ }
297
+ return styles.get(personality, 'balanced')
298
+
299
+ def train_monster(self, monster: Monster, training_type: str, intensity: int) -> Dict[str, Any]:
300
+ """Train the monster to improve stats"""
301
+
302
+ # Check if monster can train
303
+ if monster.care_state['fatigue'] > self.care_thresholds['fatigue']['exhausted']:
304
+ return {
305
+ 'success': False,
306
+ 'message': f"{monster.name} is too tired to train! 😴💤",
307
+ 'stat_changes': {}
308
+ }
309
+
310
+ if monster.care_state['hunger'] < self.care_thresholds['hunger']['low']:
311
+ return {
312
+ 'success': False,
313
+ 'message': f"{monster.name} is too hungry to train! 🍖❓",
314
+ 'stat_changes': {}
315
+ }
316
+
317
+ # Calculate stat gains
318
+ base_gain = intensity * 2
319
+ stat_gains = {}
320
+
321
+ # Apply training type modifiers
322
+ training_type = training_type.lower()
323
+ if training_type in self.training_modifiers:
324
+ for stat, modifier in self.training_modifiers[training_type].items():
325
+ if stat in monster.stats:
326
+ gain = int(base_gain * modifier * random.uniform(0.8, 1.2))
327
+
328
+ # Personality bonus
329
+ if training_type == monster.personality['training_preference']:
330
+ gain = int(gain * 1.2)
331
+
332
+ # Apply gain with stat limits
333
+ old_value = monster.stats[stat]
334
+ new_value = min(self.stat_limits[stat][1], old_value + gain)
335
+ actual_gain = new_value - old_value
336
+
337
+ if actual_gain > 0:
338
+ monster.stats[stat] = new_value
339
+ stat_gains[stat] = actual_gain
340
+
341
+ # Update care state
342
+ fatigue_gain = intensity * 5 + random.randint(0, 10)
343
+ happiness_gain = 5 if training_type == monster.personality['training_preference'] else 2
344
+
345
+ monster.care_state['fatigue'] = min(100, monster.care_state['fatigue'] + fatigue_gain)
346
+ monster.care_state['happiness'] = min(100, monster.care_state['happiness'] + happiness_gain)
347
+ monster.care_state['hunger'] = max(0, monster.care_state['hunger'] - intensity * 2)
348
+
349
+ # Update training count
350
+ monster.training_count += 1
351
+
352
+ # Check for evolution
353
+ evolution_check = self.check_evolution(monster)
354
+
355
+ # Generate response message
356
+ if stat_gains:
357
+ gains_text = ", ".join([f"{stat.upper()} +{gain}" for stat, gain in stat_gains.items()])
358
+ message = f"💪 Training complete! {gains_text}"
359
+ else:
360
+ message = f"📈 {monster.name} has reached stat limits in this area!"
361
+
362
+ return {
363
+ 'success': True,
364
+ 'message': message,
365
+ 'stat_changes': stat_gains,
366
+ 'fatigue_gained': fatigue_gain,
367
+ 'evolution_check': evolution_check
368
+ }
369
+
370
+ def check_evolution(self, monster: Monster) -> Optional[Dict[str, Any]]:
371
+ """Check if monster meets evolution requirements"""
372
+
373
+ current_stage = monster.stage
374
+ next_stage = None
375
+
376
+ if current_stage == 'rookie':
377
+ next_stage = 'champion'
378
+ elif current_stage == 'champion':
379
+ next_stage = 'ultimate'
380
+ elif current_stage == 'ultimate':
381
+ next_stage = 'mega'
382
+ else:
383
+ return None
384
+
385
+ requirements = self.evolution_requirements.get(next_stage)
386
+ if not requirements:
387
+ return None
388
+
389
+ # Check requirements
390
+ total_stats = sum(monster.stats.values())
391
+ avg_care = sum(monster.care_state.values()) / len(monster.care_state)
392
+ age_days = (datetime.now() - monster.birth_time).days
393
+
394
+ meets_requirements = (
395
+ total_stats >= requirements['min_stats'] and
396
+ avg_care >= requirements['min_care'] and
397
+ age_days >= requirements['min_age'] and
398
+ monster.training_count >= requirements['training_count']
399
+ )
400
+
401
+ if meets_requirements:
402
+ return {
403
+ 'can_evolve': True,
404
+ 'next_stage': next_stage,
405
+ 'message': f"✨ {monster.name} is ready to evolve to {next_stage}!"
406
+ }
407
+ else:
408
+ return {
409
+ 'can_evolve': False,
410
+ 'next_stage': next_stage,
411
+ 'progress': {
412
+ 'stats': f"{total_stats}/{requirements['min_stats']}",
413
+ 'care': f"{avg_care:.0f}%/{requirements['min_care']}%",
414
+ 'age': f"{age_days}/{requirements['min_age']} days",
415
+ 'training': f"{monster.training_count}/{requirements['training_count']}"
416
+ }
417
+ }
418
+
419
+ def feed_monster(self, monster: Monster, food_type: str) -> Dict[str, Any]:
420
+ """Feed the monster"""
421
+
422
+ food_effects = {
423
+ 'meat': {'hunger': 40, 'happiness': 10, 'health': 5},
424
+ 'fish': {'hunger': 35, 'happiness': 15, 'health': 10},
425
+ 'vegetable': {'hunger': 30, 'happiness': 5, 'health': 15},
426
+ 'treat': {'hunger': 20, 'happiness': 30, 'health': 0},
427
+ 'medicine': {'hunger': 0, 'happiness': -10, 'health': 50}
428
+ }
429
+
430
+ effects = food_effects.get(food_type.lower(), food_effects['meat'])
431
+
432
+ # Apply personality preferences
433
+ likes_food = food_type.lower() in [like.lower() for like in monster.personality.get('likes', [])]
434
+ dislikes_food = food_type.lower() in [dislike.lower() for dislike in monster.personality.get('dislikes', [])]
435
+
436
+ if likes_food:
437
+ effects['happiness'] *= 2
438
+ elif dislikes_food:
439
+ effects['happiness'] = -abs(effects['happiness'])
440
+
441
+ # Update care state
442
+ old_hunger = monster.care_state['hunger']
443
+ monster.care_state['hunger'] = min(100, monster.care_state['hunger'] + effects['hunger'])
444
+ monster.care_state['happiness'] = max(0, min(100, monster.care_state['happiness'] + effects['happiness']))
445
+ monster.care_state['health'] = min(100, monster.care_state['health'] + effects['health'])
446
+
447
+ # Generate response
448
+ if likes_food:
449
+ message = f"😋 {monster.name} loves {food_type}! 💖"
450
+ elif dislikes_food:
451
+ message = f"😒 {monster.name} doesn't like {food_type}... 💔"
452
+ elif old_hunger < 30:
453
+ message = f"🍽️ {monster.name} was very hungry! Much better now! 😊"
454
+ else:
455
+ message = f"🍴 {monster.name} enjoyed the {food_type}! 👍"
456
+
457
+ return {
458
+ 'success': True,
459
+ 'message': message,
460
+ 'effects': effects,
461
+ 'current_state': monster.care_state
462
+ }
463
+
464
+ def update_care_state(self, monster: Monster, time_passed: timedelta) -> Dict[str, Any]:
465
+ """Update monster care state based on time passed"""
466
+
467
+ # Calculate hours passed
468
+ hours = time_passed.total_seconds() / 3600
469
+
470
+ # Decrease hunger and happiness over time
471
+ monster.care_state['hunger'] = max(0, monster.care_state['hunger'] - hours * 5)
472
+ monster.care_state['happiness'] = max(0, monster.care_state['happiness'] - hours * 2)
473
+
474
+ # Decrease fatigue over time (rest)
475
+ monster.care_state['fatigue'] = max(0, monster.care_state['fatigue'] - hours * 10)
476
+
477
+ # Health changes based on other stats
478
+ if monster.care_state['hunger'] < 20:
479
+ monster.care_state['health'] = max(0, monster.care_state['health'] - hours * 3)
480
+ elif monster.care_state['happiness'] < 20:
481
+ monster.care_state['health'] = max(0, monster.care_state['health'] - hours * 1)
482
+
483
+ # Check for critical states
484
+ alerts = []
485
+ if monster.care_state['hunger'] < self.care_thresholds['hunger']['critical']:
486
+ alerts.append("🍖 Your monster is starving!")
487
+ if monster.care_state['happiness'] < self.care_thresholds['happiness']['critical']:
488
+ alerts.append("😢 Your monster is very unhappy!")
489
+ if monster.care_state['health'] < self.care_thresholds['health']['critical']:
490
+ alerts.append("🏥 Your monster needs medical attention!")
491
+
492
+ return {
493
+ 'updated_state': monster.care_state,
494
+ 'alerts': alerts,
495
+ 'time_since_update': str(time_passed)
496
+ }
core/state_manager.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from pathlib import Path
4
+ from datetime import datetime, timedelta
5
+ from typing import Dict, List, Optional, Any
6
+ import shutil
7
+ from core.game_mechanics import Monster
8
+
9
+ class StateManager:
10
+ """Manages persistent state for users and monsters"""
11
+
12
+ def __init__(self, data_dir: Path):
13
+ self.data_dir = Path(data_dir)
14
+ self.users_dir = self.data_dir / "users"
15
+ self.monsters_dir = self.data_dir / "monsters"
16
+ self.cache_dir = self.data_dir / "cache"
17
+
18
+ # Create directories if they don't exist
19
+ for dir_path in [self.users_dir, self.monsters_dir, self.cache_dir]:
20
+ dir_path.mkdir(parents=True, exist_ok=True)
21
+
22
+ # In-memory cache for active sessions
23
+ self.active_sessions = {}
24
+ self.last_save_time = {}
25
+
26
+ def get_user_dir(self, user_id: str) -> Path:
27
+ """Get or create user directory"""
28
+ user_dir = self.users_dir / user_id
29
+ user_dir.mkdir(exist_ok=True)
30
+ return user_dir
31
+
32
+ def save_monster(self, user_id: str, monster: Monster) -> bool:
33
+ """Save monster to persistent storage"""
34
+ try:
35
+ user_dir = self.get_user_dir(user_id)
36
+
37
+ # Save monster data
38
+ monster_file = user_dir / f"monster_{monster.name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
39
+ with open(monster_file, 'w') as f:
40
+ json.dump(monster.to_dict(), f, indent=2)
41
+
42
+ # Update current monster reference
43
+ current_file = user_dir / "current_monster.json"
44
+ current_data = {
45
+ 'monster_file': str(monster_file.name),
46
+ 'monster_name': monster.name,
47
+ 'last_updated': datetime.now().isoformat()
48
+ }
49
+ with open(current_file, 'w') as f:
50
+ json.dump(current_data, f, indent=2)
51
+
52
+ # Update user profile
53
+ self._update_user_profile(user_id, monster)
54
+
55
+ # Cache in memory
56
+ self.active_sessions[user_id] = {
57
+ 'monster': monster,
58
+ 'last_access': datetime.now()
59
+ }
60
+
61
+ return True
62
+
63
+ except Exception as e:
64
+ print(f"Error saving monster: {e}")
65
+ return False
66
+
67
+ def get_current_monster(self, user_id: str) -> Optional[Monster]:
68
+ """Get the current active monster for a user"""
69
+
70
+ # Check memory cache first
71
+ if user_id in self.active_sessions:
72
+ session = self.active_sessions[user_id]
73
+ if datetime.now() - session['last_access'] < timedelta(minutes=30):
74
+ session['last_access'] = datetime.now()
75
+ return session['monster']
76
+
77
+ # Load from disk
78
+ try:
79
+ user_dir = self.get_user_dir(user_id)
80
+ current_file = user_dir / "current_monster.json"
81
+
82
+ if not current_file.exists():
83
+ return None
84
+
85
+ with open(current_file, 'r') as f:
86
+ current_data = json.load(f)
87
+
88
+ monster_file = user_dir / current_data['monster_file']
89
+ if not monster_file.exists():
90
+ return None
91
+
92
+ with open(monster_file, 'r') as f:
93
+ monster_data = json.load(f)
94
+
95
+ monster = Monster.from_dict(monster_data)
96
+
97
+ # Update cache
98
+ self.active_sessions[user_id] = {
99
+ 'monster': monster,
100
+ 'last_access': datetime.now()
101
+ }
102
+
103
+ return monster
104
+
105
+ except Exception as e:
106
+ print(f"Error loading monster: {e}")
107
+ return None
108
+
109
+ def update_monster(self, user_id: str, monster: Monster) -> bool:
110
+ """Update existing monster data"""
111
+
112
+ # Update in memory
113
+ if user_id in self.active_sessions:
114
+ self.active_sessions[user_id]['monster'] = monster
115
+ self.active_sessions[user_id]['last_access'] = datetime.now()
116
+
117
+ # Save periodically (every 5 minutes) or if important changes
118
+ should_save = False
119
+ current_time = datetime.now()
120
+
121
+ if user_id not in self.last_save_time:
122
+ should_save = True
123
+ else:
124
+ time_since_save = current_time - self.last_save_time[user_id]
125
+ if time_since_save > timedelta(minutes=5):
126
+ should_save = True
127
+
128
+ # Always save on evolution or critical states
129
+ if monster.care_state['health'] < 30 or monster.care_state['hunger'] < 20:
130
+ should_save = True
131
+
132
+ if should_save:
133
+ self.last_save_time[user_id] = current_time
134
+ return self.save_monster(user_id, monster)
135
+
136
+ return True
137
+
138
+ def get_user_monsters(self, user_id: str) -> List[Dict[str, Any]]:
139
+ """Get all monsters for a user"""
140
+ try:
141
+ user_dir = self.get_user_dir(user_id)
142
+ monsters = []
143
+
144
+ for file_path in user_dir.glob("monster_*.json"):
145
+ if file_path.name != "current_monster.json":
146
+ with open(file_path, 'r') as f:
147
+ monster_data = json.load(f)
148
+ monsters.append({
149
+ 'file': file_path.name,
150
+ 'name': monster_data.get('name'),
151
+ 'species': monster_data.get('species'),
152
+ 'stage': monster_data.get('stage'),
153
+ 'birth_time': monster_data.get('birth_time')
154
+ })
155
+
156
+ # Sort by birth time (newest first)
157
+ monsters.sort(key=lambda x: x['birth_time'], reverse=True)
158
+ return monsters
159
+
160
+ except Exception as e:
161
+ print(f"Error getting user monsters: {e}")
162
+ return []
163
+
164
+ def _update_user_profile(self, user_id: str, monster: Monster):
165
+ """Update user profile with monster statistics"""
166
+ try:
167
+ user_dir = self.get_user_dir(user_id)
168
+ profile_file = user_dir / "profile.json"
169
+
170
+ # Load existing profile or create new
171
+ if profile_file.exists():
172
+ with open(profile_file, 'r') as f:
173
+ profile = json.load(f)
174
+ else:
175
+ profile = {
176
+ 'user_id': user_id,
177
+ 'created': datetime.now().isoformat(),
178
+ 'monsters_created': 0,
179
+ 'total_training_sessions': 0,
180
+ 'achievements': []
181
+ }
182
+
183
+ # Update statistics
184
+ profile['monsters_created'] = profile.get('monsters_created', 0) + 1
185
+ profile['last_active'] = datetime.now().isoformat()
186
+ profile['current_monster'] = monster.name
187
+
188
+ # Check for achievements
189
+ new_achievements = self._check_achievements(profile, monster)
190
+ profile['achievements'].extend(new_achievements)
191
+
192
+ # Save profile
193
+ with open(profile_file, 'w') as f:
194
+ json.dump(profile, f, indent=2)
195
+
196
+ except Exception as e:
197
+ print(f"Error updating user profile: {e}")
198
+
199
+ def _check_achievements(self, profile: Dict, monster: Monster) -> List[Dict[str, Any]]:
200
+ """Check for new achievements"""
201
+ achievements = []
202
+ current_achievements = {a['id'] for a in profile.get('achievements', [])}
203
+
204
+ # First monster achievement
205
+ if profile['monsters_created'] == 1 and 'first_monster' not in current_achievements:
206
+ achievements.append({
207
+ 'id': 'first_monster',
208
+ 'name': 'Digital Pioneer',
209
+ 'description': 'Created your first digital monster',
210
+ 'icon': '🥇',
211
+ 'unlocked': datetime.now().isoformat()
212
+ })
213
+
214
+ # Multiple monsters achievement
215
+ if profile['monsters_created'] == 5 and 'monster_collector' not in current_achievements:
216
+ achievements.append({
217
+ 'id': 'monster_collector',
218
+ 'name': 'Monster Collector',
219
+ 'description': 'Created 5 digital monsters',
220
+ 'icon': '🏆',
221
+ 'unlocked': datetime.now().isoformat()
222
+ })
223
+
224
+ # Perfect care achievement
225
+ if all(monster.care_state[stat] >= 90 for stat in ['hunger', 'happiness', 'health']):
226
+ if 'perfect_care' not in current_achievements:
227
+ achievements.append({
228
+ 'id': 'perfect_care',
229
+ 'name': 'Perfect Caretaker',
230
+ 'description': 'Achieved perfect care status',
231
+ 'icon': '💖',
232
+ 'unlocked': datetime.now().isoformat()
233
+ })
234
+
235
+ return achievements
236
+
237
+ def get_user_profile(self, user_id: str) -> Optional[Dict[str, Any]]:
238
+ """Get user profile"""
239
+ try:
240
+ user_dir = self.get_user_dir(user_id)
241
+ profile_file = user_dir / "profile.json"
242
+
243
+ if profile_file.exists():
244
+ with open(profile_file, 'r') as f:
245
+ return json.load(f)
246
+ return None
247
+
248
+ except Exception as e:
249
+ print(f"Error loading user profile: {e}")
250
+ return None
251
+
252
+ def cleanup_old_sessions(self):
253
+ """Clean up old sessions from memory"""
254
+ current_time = datetime.now()
255
+ expired_users = []
256
+
257
+ for user_id, session in self.active_sessions.items():
258
+ if current_time - session['last_access'] > timedelta(hours=1):
259
+ expired_users.append(user_id)
260
+
261
+ for user_id in expired_users:
262
+ # Save before removing from cache
263
+ if 'monster' in self.active_sessions[user_id]:
264
+ self.save_monster(user_id, self.active_sessions[user_id]['monster'])
265
+ del self.active_sessions[user_id]
266
+
267
+ def export_user_data(self, user_id: str) -> Optional[str]:
268
+ """Export all user data as a zip file"""
269
+ try:
270
+ user_dir = self.get_user_dir(user_id)
271
+ export_path = self.cache_dir / f"export_{user_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
272
+
273
+ # Create zip archive
274
+ shutil.make_archive(str(export_path), 'zip', user_dir)
275
+
276
+ return f"{export_path}.zip"
277
+
278
+ except Exception as e:
279
+ print(f"Error exporting user data: {e}")
280
+ return None
docs/HUNYUAN3D_INTEGRATION.md DELETED
@@ -1,264 +0,0 @@
1
- # Hunyuan3D-2.1 Integration for DigiPal
2
-
3
- ## Overview
4
-
5
- The Hunyuan3D pipeline integrates Tencent's state-of-the-art Hunyuan3D-2.1 model into DigiPal, providing advanced 3D generation capabilities for monster creation. This pipeline offers multiple generation modes, high-quality outputs, and seamless integration with the existing DigiPal monster system.
6
-
7
- ## Features
8
-
9
- ### Core Capabilities
10
-
11
- 1. **Text-to-3D Generation**
12
- - Generate 3D models from text descriptions
13
- - Automatic concept image generation
14
- - Two-stage process: shape generation + texture generation
15
-
16
- 2. **Image-to-3D Generation**
17
- - Convert single images to 3D models
18
- - Automatic background removal
19
- - Foreground ratio control for optimal results
20
-
21
- 3. **Multi-View Generation**
22
- - Generate from front, back, left, and right views
23
- - Higher accuracy than single image input
24
- - Ideal for complex monster designs
25
-
26
- ### Generation Modes
27
-
28
- - **Turbo Mode**: Fastest generation, suitable for prototypes and baby monsters
29
- - **Fast Mode**: Balanced speed and quality, ideal for most use cases
30
- - **Standard Mode**: Best quality, recommended for final assets
31
-
32
- ### Export Formats
33
-
34
- - **GLB** (recommended): GLTF binary format with embedded textures
35
- - **OBJ**: Wavefront format for compatibility
36
- - **PLY**: Point cloud format
37
- - **STL**: For 3D printing applications
38
-
39
- ### Texture Options
40
-
41
- - **RGB**: Standard color textures
42
- - **PBR**: Physically-based rendering with metallic, roughness, and normal maps
43
-
44
- ## Installation
45
-
46
- 1. Ensure you have the required dependencies:
47
- ```bash
48
- pip install gradio_client>=0.8.0 trimesh>=4.0.0 aiohttp>=3.9.0
49
- ```
50
-
51
- 2. The pipeline is located at: `src/pipelines/hunyuan3d_pipeline.py`
52
-
53
- ## Configuration
54
-
55
- Create a configuration file or use the default settings:
56
-
57
- ```python
58
- from src.pipelines.hunyuan3d_pipeline import Hunyuan3DConfig, GenerationMode
59
-
60
- config = Hunyuan3DConfig(
61
- space_id="Tencent/Hunyuan3D-2", # Official HF Space
62
- default_mode=GenerationMode.FAST,
63
- texture_method=TextureMethod.RGB,
64
- export_format=ExportFormat.GLB,
65
- target_polycount=30000,
66
- enable_optimization=True
67
- )
68
- ```
69
-
70
- ## Usage Examples
71
-
72
- ### Basic Text-to-3D Generation
73
-
74
- ```python
75
- from src.pipelines.hunyuan3d_pipeline import Hunyuan3DPipeline, GenerationMode
76
-
77
- # Initialize pipeline
78
- pipeline = Hunyuan3DPipeline()
79
-
80
- # Generate from text
81
- result = await pipeline.generate_from_text(
82
- prompt="cute blue dragon with big eyes and small wings",
83
- name="BlueDragon",
84
- mode=GenerationMode.FAST
85
- )
86
-
87
- if result["success"]:
88
- print(f"Model saved at: {result['paths']['processed_model']}")
89
- ```
90
-
91
- ### Image-to-3D Generation
92
-
93
- ```python
94
- # Generate from a single image
95
- result = await pipeline.generate_from_image(
96
- image_path="dragon_concept.png",
97
- name="DragonFromImage",
98
- mode=GenerationMode.STANDARD
99
- )
100
- ```
101
-
102
- ### Multi-View Generation
103
-
104
- ```python
105
- # Generate from multiple views for better accuracy
106
- views = {
107
- "front": "dragon_front.png",
108
- "back": "dragon_back.png",
109
- "left": "dragon_left.png",
110
- "right": "dragon_right.png"
111
- }
112
-
113
- result = await pipeline.generate_from_multi_view(
114
- image_paths=views,
115
- name="DragonMultiView",
116
- mode=GenerationMode.STANDARD
117
- )
118
- ```
119
-
120
- ### DigiPal Monster Integration
121
-
122
- ```python
123
- from src.pipelines.hunyuan3d_pipeline import DigiPalHunyuan3DIntegration
124
-
125
- # Initialize integration
126
- integration = DigiPalHunyuan3DIntegration()
127
-
128
- # Generate model for a DigiPal monster
129
- result = await integration.generate_monster_model(
130
- monster=my_monster, # DW1Monster instance
131
- force_regenerate=False
132
- )
133
- ```
134
-
135
- ## Integration with DigiPal System
136
-
137
- ### Automatic Monster Generation
138
-
139
- The pipeline automatically creates appropriate prompts based on monster attributes:
140
-
141
- - **Stage**: Determines model complexity (baby → ultimate)
142
- - **Personality**: Influences visual style and pose
143
- - **Species Type**: Affects color scheme and texture
144
- - **Stats**: High offense adds claws, high defense adds armor, etc.
145
-
146
- ### Evolution Support
147
-
148
- Generate complete evolution sequences:
149
-
150
- ```python
151
- # Generate all evolution stages
152
- results = await integration.generate_evolution_sequence(
153
- monster=my_monster,
154
- stages=["baby", "child", "adult", "perfect", "ultimate"]
155
- )
156
- ```
157
-
158
- ### Caching System
159
-
160
- - Models are automatically cached to avoid regeneration
161
- - Cache key based on monster ID and evolution stage
162
- - Force regeneration available when needed
163
-
164
- ## Performance Optimization
165
-
166
- ### Model Optimization
167
-
168
- - Automatic mesh simplification to target polycount
169
- - Geometry cleanup (degenerate faces, duplicates)
170
- - UV optimization for efficient texture mapping
171
- - Watertight mesh generation for rigging
172
-
173
- ### Generation Speed
174
-
175
- - **Turbo Mode**: ~30-60 seconds
176
- - **Fast Mode**: ~1-2 minutes
177
- - **Standard Mode**: ~2-4 minutes
178
-
179
- ### Batch Processing
180
-
181
- Generate multiple models concurrently:
182
-
183
- ```python
184
- tasks = [
185
- {"prompt": "fire dragon", "name": "FireDragon"},
186
- {"prompt": "water turtle", "name": "WaterTurtle"},
187
- {"prompt": "electric mouse", "name": "ElectricMouse"}
188
- ]
189
-
190
- results = await pipeline.batch_generate(
191
- tasks,
192
- max_concurrent=2 # Process 2 at a time
193
- )
194
- ```
195
-
196
- ## Comparison with Other Pipelines
197
-
198
- | Feature | Hunyuan3D | Meshy AI | Open Source |
199
- |---------|-----------|----------|-------------|
200
- | Text-to-3D | ✓ | ✓ | ✓ |
201
- | Image-to-3D | ✓ | Limited | ✓ |
202
- | Multi-View | ✓ | ✗ | ✓ |
203
- | Speed | Fast | Medium | Slow |
204
- | Quality | High | High | Medium |
205
- | PBR Textures | ✓ | ✓ | Limited |
206
- | API Cost | Free* | Paid | Free |
207
- | Auto-Rigging | ✗ | ✓ | ✓ |
208
-
209
- *Free via Hugging Face Spaces, subject to usage limits
210
-
211
- ## Best Practices
212
-
213
- ### Prompt Engineering
214
-
215
- 1. **Be Specific**: Include details about size, color, features
216
- 2. **Use T-Pose**: Add "T-pose" for better rigging compatibility
217
- 3. **Neutral Background**: Specify "neutral background" for cleaner results
218
- 4. **Game Asset**: Include "game character" or "game asset" for optimization
219
-
220
- ### Quality Settings
221
-
222
- - Use **Turbo** mode for rapid prototyping
223
- - Use **Fast** mode for development and testing
224
- - Use **Standard** mode for final production assets
225
-
226
- ### Multi-View Tips
227
-
228
- - Ensure consistent lighting across all views
229
- - Use the same background for all images
230
- - Maintain the same scale and position
231
- - Remove shadows for better reconstruction
232
-
233
- ## Troubleshooting
234
-
235
- ### Common Issues
236
-
237
- 1. **Connection Failed**: Check internet connection and HF Space availability
238
- 2. **Generation Timeout**: Reduce quality settings or use Turbo mode
239
- 3. **Low Quality Output**: Use Standard mode or provide better input images
240
- 4. **Missing Textures**: Ensure texture_method is set correctly
241
-
242
- ### Error Handling
243
-
244
- The pipeline includes comprehensive error handling:
245
- - Automatic retries with exponential backoff
246
- - Graceful fallbacks for failed generations
247
- - Detailed error messages in results
248
-
249
- ## Future Enhancements
250
-
251
- - [ ] Animation generation support
252
- - [ ] Advanced rigging integration
253
- - [ ] Real-time preview during generation
254
- - [ ] Custom texture painting
255
- - [ ] Physics simulation setup
256
- - [ ] LOD (Level of Detail) generation
257
-
258
- ## API Reference
259
-
260
- See the inline documentation in `src/pipelines/hunyuan3d_pipeline.py` for detailed API reference.
261
-
262
- ## Credits
263
-
264
- This integration uses Tencent's Hunyuan3D-2.1 model, available through Hugging Face Spaces. Special thanks to the Tencent team for making this technology accessible.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/app.html DELETED
@@ -1,12 +0,0 @@
1
- <!doctype html>
2
- <html lang="en">
3
- <head>
4
- <meta charset="utf-8" />
5
- <link rel="icon" href="%sveltekit.assets%/favicon.png" />
6
- <meta name="viewport" content="width=device-width, initial-scale=1" />
7
- %sveltekit.head%
8
- </head>
9
- <body data-sveltekit-preload-data="hover" class="bg-black text-white">
10
- <div style="display: contents">%sveltekit.body%</div>
11
- </body>
12
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/frontend/.gitignore DELETED
@@ -1,23 +0,0 @@
1
- node_modules
2
-
3
- # Output
4
- .output
5
- .vercel
6
- .netlify
7
- .wrangler
8
- /.svelte-kit
9
- /build
10
-
11
- # OS
12
- .DS_Store
13
- Thumbs.db
14
-
15
- # Env
16
- .env
17
- .env.*
18
- !.env.example
19
- !.env.test
20
-
21
- # Vite
22
- vite.config.js.timestamp-*
23
- vite.config.ts.timestamp-*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/frontend/.npmrc DELETED
@@ -1 +0,0 @@
1
- engine-strict=true
 
 
frontend/frontend/README.md DELETED
@@ -1,38 +0,0 @@
1
- # sv
2
-
3
- Everything you need to build a Svelte project, powered by [`sv`](https://github.com/sveltejs/cli).
4
-
5
- ## Creating a project
6
-
7
- If you're seeing this, you've probably already done this step. Congrats!
8
-
9
- ```bash
10
- # create a new project in the current directory
11
- npx sv create
12
-
13
- # create a new project in my-app
14
- npx sv create my-app
15
- ```
16
-
17
- ## Developing
18
-
19
- Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server:
20
-
21
- ```bash
22
- npm run dev
23
-
24
- # or start the server and open the app in a new browser tab
25
- npm run dev -- --open
26
- ```
27
-
28
- ## Building
29
-
30
- To create a production version of your app:
31
-
32
- ```bash
33
- npm run build
34
- ```
35
-
36
- You can preview the production build with `npm run preview`.
37
-
38
- > To deploy your app, you may need to install an [adapter](https://svelte.dev/docs/kit/adapters) for your target environment.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/frontend/package.json DELETED
@@ -1,23 +0,0 @@
1
- {
2
- "name": "frontend",
3
- "private": true,
4
- "version": "0.0.1",
5
- "type": "module",
6
- "scripts": {
7
- "dev": "vite dev",
8
- "build": "vite build",
9
- "preview": "vite preview",
10
- "prepare": "svelte-kit sync || echo ''",
11
- "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
12
- "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch"
13
- },
14
- "devDependencies": {
15
- "@sveltejs/adapter-auto": "^6.0.0",
16
- "@sveltejs/kit": "^2.16.0",
17
- "@sveltejs/vite-plugin-svelte": "^5.0.0",
18
- "svelte": "^5.0.0",
19
- "svelte-check": "^4.0.0",
20
- "typescript": "^5.0.0",
21
- "vite": "^6.2.6"
22
- }
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/frontend/src/app.d.ts DELETED
@@ -1,13 +0,0 @@
1
- // See https://svelte.dev/docs/kit/types#app.d.ts
2
- // for information about these interfaces
3
- declare global {
4
- namespace App {
5
- // interface Error {}
6
- // interface Locals {}
7
- // interface PageData {}
8
- // interface PageState {}
9
- // interface Platform {}
10
- }
11
- }
12
-
13
- export {};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/frontend/src/app.html DELETED
@@ -1,12 +0,0 @@
1
- <!doctype html>
2
- <html lang="en">
3
- <head>
4
- <meta charset="utf-8" />
5
- <link rel="icon" href="%sveltekit.assets%/favicon.png" />
6
- <meta name="viewport" content="width=device-width, initial-scale=1" />
7
- %sveltekit.head%
8
- </head>
9
- <body data-sveltekit-preload-data="hover">
10
- <div style="display: contents">%sveltekit.body%</div>
11
- </body>
12
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/frontend/src/routes/+page.svelte DELETED
@@ -1,2 +0,0 @@
1
- <h1>Welcome to SvelteKit</h1>
2
- <p>Visit <a href="https://svelte.dev/docs/kit">svelte.dev/docs/kit</a> to read the documentation</p>
 
 
 
frontend/frontend/static/favicon.png DELETED
Binary file (1.57 kB)
 
frontend/frontend/svelte.config.js DELETED
@@ -1,18 +0,0 @@
1
- import adapter from '@sveltejs/adapter-auto';
2
- import { vitePreprocess } from '@sveltejs/vite-plugin-svelte';
3
-
4
- /** @type {import('@sveltejs/kit').Config} */
5
- const config = {
6
- // Consult https://svelte.dev/docs/kit/integrations
7
- // for more information about preprocessors
8
- preprocess: vitePreprocess(),
9
-
10
- kit: {
11
- // adapter-auto only supports some environments, see https://svelte.dev/docs/kit/adapter-auto for a list.
12
- // If your environment is not supported, or you settled on a specific environment, switch out the adapter.
13
- // See https://svelte.dev/docs/kit/adapters for more information about adapters.
14
- adapter: adapter()
15
- }
16
- };
17
-
18
- export default config;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/frontend/tsconfig.json DELETED
@@ -1,19 +0,0 @@
1
- {
2
- "extends": "./.svelte-kit/tsconfig.json",
3
- "compilerOptions": {
4
- "allowJs": true,
5
- "checkJs": true,
6
- "esModuleInterop": true,
7
- "forceConsistentCasingInFileNames": true,
8
- "resolveJsonModule": true,
9
- "skipLibCheck": true,
10
- "sourceMap": true,
11
- "strict": true,
12
- "moduleResolution": "bundler"
13
- }
14
- // Path aliases are handled by https://svelte.dev/docs/kit/configuration#alias
15
- // except $lib which is handled by https://svelte.dev/docs/kit/configuration#files
16
- //
17
- // If you want to overwrite includes/excludes, make sure to copy over the relevant includes/excludes
18
- // from the referenced tsconfig.json - TypeScript does not merge them in
19
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/frontend/vite.config.ts DELETED
@@ -1,6 +0,0 @@
1
- import { sveltekit } from '@sveltejs/kit/vite';
2
- import { defineConfig } from 'vite';
3
-
4
- export default defineConfig({
5
- plugins: [sveltekit()]
6
- });
 
 
 
 
 
 
 
frontend/package.json DELETED
@@ -1,31 +0,0 @@
1
- {
2
- "name": "digipal-frontend",
3
- "version": "1.0.0",
4
- "private": true,
5
- "scripts": {
6
- "dev": "vite dev",
7
- "build": "vite build",
8
- "preview": "vite preview",
9
- "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
10
- "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch"
11
- },
12
- "devDependencies": {
13
- "@sveltejs/adapter-auto": "^3.0.0",
14
- "@sveltejs/kit": "^2.0.0",
15
- "@sveltejs/vite-plugin-svelte": "^3.0.0",
16
- "@types/three": "^0.160.0",
17
- "svelte": "^4.2.7",
18
- "svelte-check": "^3.6.0",
19
- "tslib": "^2.4.1",
20
- "typescript": "^5.0.0",
21
- "vite": "^5.0.3"
22
- },
23
- "dependencies": {
24
- "@threlte/core": "^7.1.0",
25
- "@threlte/extras": "^8.7.5",
26
- "three": "^0.160.1",
27
- "tailwindcss": "^3.4.0",
28
- "autoprefixer": "^10.4.16",
29
- "postcss": "^8.4.32"
30
- }
31
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/postcss.config.js DELETED
@@ -1,6 +0,0 @@
1
- export default {
2
- plugins: {
3
- tailwindcss: {},
4
- autoprefixer: {},
5
- },
6
- }
 
 
 
 
 
 
 
frontend/src/app.css DELETED
@@ -1,153 +0,0 @@
1
- @import url('https://fonts.googleapis.com/css2?family=Press+Start+2P&family=Inter:wght@400;500;600;700&display=swap');
2
- @tailwind base;
3
- @tailwind components;
4
- @tailwind utilities;
5
-
6
- @layer base {
7
- body {
8
- @apply bg-black text-white;
9
- }
10
- }
11
-
12
- @layer components {
13
- /* CRT effect */
14
- .crt-effect {
15
- position: relative;
16
- overflow: hidden;
17
- }
18
-
19
- .crt-effect::before {
20
- content: " ";
21
- display: block;
22
- position: absolute;
23
- top: 0;
24
- left: 0;
25
- bottom: 0;
26
- right: 0;
27
- background: linear-gradient(
28
- rgba(18, 16, 16, 0) 50%,
29
- rgba(0, 0, 0, 0.25) 50%
30
- );
31
- background-size: 100% 2px;
32
- z-index: 2;
33
- pointer-events: none;
34
- animation: scan-lines 8s linear infinite;
35
- }
36
-
37
- .crt-effect::after {
38
- content: " ";
39
- display: block;
40
- position: absolute;
41
- top: 0;
42
- left: 0;
43
- bottom: 0;
44
- right: 0;
45
- background: rgba(18, 16, 16, 0.1);
46
- opacity: 0;
47
- z-index: 2;
48
- pointer-events: none;
49
- animation: flicker 0.15s infinite;
50
- }
51
-
52
- /* Holographic button */
53
- .holographic-button {
54
- @apply relative px-4 py-2 font-pixel text-xs uppercase tracking-wider;
55
- background: linear-gradient(45deg, #00CED1, #FF6B00, #00CED1);
56
- background-size: 200% 200%;
57
- animation: hologram 3s ease-in-out infinite;
58
- clip-path: polygon(10% 0%, 100% 0%, 90% 100%, 0% 100%);
59
- }
60
-
61
- .holographic-button:hover {
62
- @apply brightness-125;
63
- }
64
-
65
- /* Device frame */
66
- .device-frame {
67
- @apply relative bg-digipal-gray rounded-3xl p-8 shadow-2xl;
68
- background-image:
69
- radial-gradient(circle at 20% 80%, #FF6B00 0%, transparent 50%),
70
- radial-gradient(circle at 80% 20%, #00CED1 0%, transparent 50%),
71
- radial-gradient(circle at 40% 40%, #2D2D2D 0%, transparent 50%);
72
- }
73
-
74
- /* D-pad button */
75
- .dpad-button {
76
- @apply bg-gray-800 hover:bg-gray-700 active:bg-gray-900 transition-colors;
77
- @apply border-2 border-gray-600;
78
- }
79
-
80
- /* Action button */
81
- .action-button {
82
- @apply rounded-full bg-gradient-to-br from-gray-700 to-gray-900;
83
- @apply hover:from-gray-600 hover:to-gray-800 active:from-gray-800 active:to-black;
84
- @apply transition-all duration-150 transform active:scale-95;
85
- @apply shadow-lg;
86
- }
87
- }
88
-
89
- @keyframes flicker {
90
- 0% {
91
- opacity: 0.27861;
92
- }
93
- 5% {
94
- opacity: 0.34769;
95
- }
96
- 10% {
97
- opacity: 0.23604;
98
- }
99
- 15% {
100
- opacity: 0.90626;
101
- }
102
- 20% {
103
- opacity: 0.18128;
104
- }
105
- 25% {
106
- opacity: 0.83891;
107
- }
108
- 30% {
109
- opacity: 0.65583;
110
- }
111
- 35% {
112
- opacity: 0.67807;
113
- }
114
- 40% {
115
- opacity: 0.26559;
116
- }
117
- 45% {
118
- opacity: 0.84693;
119
- }
120
- 50% {
121
- opacity: 0.96019;
122
- }
123
- 55% {
124
- opacity: 0.08594;
125
- }
126
- 60% {
127
- opacity: 0.20313;
128
- }
129
- 65% {
130
- opacity: 0.71988;
131
- }
132
- 70% {
133
- opacity: 0.53455;
134
- }
135
- 75% {
136
- opacity: 0.37288;
137
- }
138
- 80% {
139
- opacity: 0.71428;
140
- }
141
- 85% {
142
- opacity: 0.70419;
143
- }
144
- 90% {
145
- opacity: 0.7003;
146
- }
147
- 95% {
148
- opacity: 0.36108;
149
- }
150
- 100% {
151
- opacity: 0.24387;
152
- }
153
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/src/routes/+layout.svelte DELETED
@@ -1,5 +0,0 @@
1
- <script>
2
- import '../app.css';
3
- </script>
4
-
5
- <slot />
 
 
 
 
 
 
frontend/src/routes/+page.svelte DELETED
@@ -1,14 +0,0 @@
1
- <script lang="ts">
2
- import Device from '$lib/components/Device.svelte';
3
- import { monsterStore } from '$lib/stores/monsterStore';
4
- import { onMount } from 'svelte';
5
-
6
- onMount(() => {
7
- // Initialize the app
8
- monsterStore.initialize();
9
- });
10
- </script>
11
-
12
- <main class="min-h-screen flex items-center justify-center bg-gradient-to-br from-gray-900 via-black to-gray-900">
13
- <Device />
14
- </main>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/svelte.config.js DELETED
@@ -1,14 +0,0 @@
1
- import adapter from '@sveltejs/adapter-auto';
2
- import { vitePreprocess } from '@sveltejs/vite-plugin-svelte';
3
-
4
- /** @type {import('@sveltejs/kit').Config} */
5
- const config = {
6
- // Consult https://kit.svelte.dev/docs/integrations#preprocessors
7
- preprocess: vitePreprocess(),
8
-
9
- kit: {
10
- adapter: adapter()
11
- }
12
- };
13
-
14
- export default config;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/tailwind.config.js DELETED
@@ -1,58 +0,0 @@
1
- /** @type {import('tailwindcss').Config} */
2
- export default {
3
- content: ['./src/**/*.{html,js,svelte,ts}'],
4
- theme: {
5
- extend: {
6
- colors: {
7
- 'digipal-orange': '#FF6B00',
8
- 'digipal-teal': '#00CED1',
9
- 'digipal-gray': '#2D2D2D',
10
- 'neon-magenta': '#FF00FF',
11
- 'neon-cyan': '#00FFFF',
12
- },
13
- fontFamily: {
14
- 'pixel': ['Press Start 2P', 'monospace'],
15
- 'modern': ['Inter', 'sans-serif'],
16
- },
17
- animation: {
18
- 'scan-lines': 'scan-lines 8s linear infinite',
19
- 'glitch': 'glitch 2s ease-in-out infinite alternate',
20
- 'hologram': 'hologram 3s ease-in-out infinite',
21
- },
22
- keyframes: {
23
- 'scan-lines': {
24
- '0%': { transform: 'translateY(0)' },
25
- '100%': { transform: 'translateY(100%)' },
26
- },
27
- 'glitch': {
28
- '0%': {
29
- textShadow: '0.05em 0 0 #00fffc, -0.03em -0.04em 0 #fc00ff, 0.025em 0.04em 0 #fffc00'
30
- },
31
- '15%': {
32
- textShadow: '0.05em 0 0 #00fffc, -0.03em -0.04em 0 #fc00ff, 0.025em 0.04em 0 #fffc00'
33
- },
34
- '16%': {
35
- textShadow: '-0.05em -0.025em 0 #00fffc, 0.025em 0.035em 0 #fc00ff, -0.05em -0.05em 0 #fffc00'
36
- },
37
- '49%': {
38
- textShadow: '-0.05em -0.025em 0 #00fffc, 0.025em 0.035em 0 #fc00ff, -0.05em -0.05em 0 #fffc00'
39
- },
40
- '50%': {
41
- textShadow: '0.05em 0.035em 0 #00fffc, 0.03em 0 0 #fc00ff, 0 -0.04em 0 #fffc00'
42
- },
43
- '99%': {
44
- textShadow: '0.05em 0.035em 0 #00fffc, 0.03em 0 0 #fc00ff, 0 -0.04em 0 #fffc00'
45
- },
46
- '100%': {
47
- textShadow: '-0.05em 0 0 #00fffc, -0.025em -0.04em 0 #fc00ff, -0.04em -0.025em 0 #fffc00'
48
- },
49
- },
50
- 'hologram': {
51
- '0%, 100%': { opacity: '1' },
52
- '50%': { opacity: '0.7' },
53
- },
54
- },
55
- },
56
- },
57
- plugins: [],
58
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/tsconfig.json DELETED
@@ -1,13 +0,0 @@
1
- {
2
- "extends": "./.svelte-kit/tsconfig.json",
3
- "compilerOptions": {
4
- "allowJs": true,
5
- "checkJs": true,
6
- "esModuleInterop": true,
7
- "forceConsistentCasingInFileNames": true,
8
- "resolveJsonModule": true,
9
- "skipLibCheck": true,
10
- "sourceMap": true,
11
- "strict": true
12
- }
13
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
frontend/vite.config.ts DELETED
@@ -1,18 +0,0 @@
1
- import { sveltekit } from '@sveltejs/kit/vite';
2
- import { defineConfig } from 'vite';
3
-
4
- export default defineConfig({
5
- plugins: [sveltekit()],
6
- server: {
7
- proxy: {
8
- '/api': {
9
- target: 'http://localhost:7861',
10
- changeOrigin: true
11
- },
12
- '/api/ws': {
13
- target: 'ws://localhost:7861',
14
- ws: true
15
- }
16
- }
17
- }
18
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
game/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Game module initialization
models/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Model processors initialization
models/image_generator.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import DiffusionPipeline
3
+ from PIL import Image
4
+ import numpy as np
5
+ from typing import Optional, List, Union
6
+ import gc
7
+
8
+ class OmniGenImageGenerator:
9
+ """Image generation using OmniGen2 model"""
10
+
11
+ def __init__(self, device: str = "cuda"):
12
+ self.device = device if torch.cuda.is_available() else "cpu"
13
+ self.pipeline = None
14
+ self.model_id = "OmniGen2/OmniGen2" # Placeholder - actual model path may differ
15
+
16
+ # Generation parameters
17
+ self.default_width = 512
18
+ self.default_height = 512
19
+ self.num_inference_steps = 30
20
+ self.guidance_scale = 7.5
21
+
22
+ # Memory optimization
23
+ self.enable_attention_slicing = True
24
+ self.enable_vae_slicing = True
25
+ self.enable_cpu_offload = self.device == "cuda"
26
+
27
+ def load_model(self):
28
+ """Lazy load the image generation model"""
29
+ if self.pipeline is None:
30
+ try:
31
+ # Determine torch dtype
32
+ torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
33
+
34
+ # Load pipeline with optimizations
35
+ self.pipeline = DiffusionPipeline.from_pretrained(
36
+ self.model_id,
37
+ torch_dtype=torch_dtype,
38
+ use_safetensors=True,
39
+ variant="fp16" if self.device == "cuda" else None
40
+ )
41
+
42
+ # Apply optimizations
43
+ if self.device == "cuda":
44
+ if self.enable_cpu_offload:
45
+ self.pipeline.enable_sequential_cpu_offload()
46
+ else:
47
+ self.pipeline = self.pipeline.to(self.device)
48
+
49
+ if self.enable_attention_slicing:
50
+ self.pipeline.enable_attention_slicing(1)
51
+
52
+ if self.enable_vae_slicing:
53
+ self.pipeline.enable_vae_slicing()
54
+ else:
55
+ self.pipeline = self.pipeline.to(self.device)
56
+
57
+ # Compile for faster inference (if available)
58
+ if hasattr(torch, 'compile') and self.device == "cuda":
59
+ try:
60
+ self.pipeline.unet = torch.compile(self.pipeline.unet, mode="reduce-overhead")
61
+ except:
62
+ pass # Compilation is optional
63
+
64
+ except Exception as e:
65
+ print(f"Failed to load image generation model: {e}")
66
+ # Try fallback to stable diffusion
67
+ try:
68
+ self.model_id = "runwayml/stable-diffusion-v1-5"
69
+ self._load_fallback_model()
70
+ except:
71
+ raise
72
+
73
+ def _load_fallback_model(self):
74
+ """Load fallback Stable Diffusion model"""
75
+ from diffusers import StableDiffusionPipeline
76
+
77
+ torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
78
+
79
+ self.pipeline = StableDiffusionPipeline.from_pretrained(
80
+ self.model_id,
81
+ torch_dtype=torch_dtype,
82
+ use_safetensors=True
83
+ )
84
+
85
+ if self.device == "cuda" and self.enable_cpu_offload:
86
+ self.pipeline.enable_sequential_cpu_offload()
87
+ else:
88
+ self.pipeline = self.pipeline.to(self.device)
89
+
90
+ def generate(self,
91
+ prompt: str,
92
+ reference_images: Optional[List[Union[str, Image.Image]]] = None,
93
+ negative_prompt: Optional[str] = None,
94
+ width: Optional[int] = None,
95
+ height: Optional[int] = None,
96
+ num_images: int = 1,
97
+ seed: Optional[int] = None) -> Union[Image.Image, List[Image.Image]]:
98
+ """Generate monster image from prompt"""
99
+
100
+ try:
101
+ # Load model if needed
102
+ self.load_model()
103
+
104
+ # Set dimensions
105
+ width = width or self.default_width
106
+ height = height or self.default_height
107
+
108
+ # Ensure dimensions are multiples of 8
109
+ width = (width // 8) * 8
110
+ height = (height // 8) * 8
111
+
112
+ # Enhance prompt for monster generation
113
+ enhanced_prompt = self._enhance_prompt(prompt)
114
+
115
+ # Default negative prompt for quality
116
+ if negative_prompt is None:
117
+ negative_prompt = (
118
+ "low quality, blurry, distorted, disfigured, "
119
+ "bad anatomy, wrong proportions, ugly, duplicate, "
120
+ "morbid, mutilated, extra limbs, malformed"
121
+ )
122
+
123
+ # Set seed for reproducibility
124
+ generator = None
125
+ if seed is not None:
126
+ generator = torch.Generator(device=self.device).manual_seed(seed)
127
+
128
+ # Generate images
129
+ with torch.no_grad():
130
+ if hasattr(self.pipeline, '__call__'):
131
+ # Standard diffusion pipeline
132
+ images = self.pipeline(
133
+ prompt=enhanced_prompt,
134
+ negative_prompt=negative_prompt,
135
+ width=width,
136
+ height=height,
137
+ num_inference_steps=self.num_inference_steps,
138
+ guidance_scale=self.guidance_scale,
139
+ num_images_per_prompt=num_images,
140
+ generator=generator
141
+ ).images
142
+ else:
143
+ # OmniGen specific generation (if different API)
144
+ images = self._omnigen_generate(
145
+ enhanced_prompt,
146
+ reference_images,
147
+ width,
148
+ height,
149
+ num_images
150
+ )
151
+
152
+ # Clean up memory
153
+ if self.device == "cuda":
154
+ torch.cuda.empty_cache()
155
+
156
+ # Return single image or list
157
+ if num_images == 1:
158
+ return images[0]
159
+ return images
160
+
161
+ except Exception as e:
162
+ print(f"Image generation error: {e}")
163
+ # Return fallback image
164
+ return self._generate_fallback_image(width, height)
165
+
166
+ def _enhance_prompt(self, base_prompt: str) -> str:
167
+ """Enhance prompt for better monster generation"""
168
+ enhancements = [
169
+ "digital art",
170
+ "creature design",
171
+ "game character",
172
+ "detailed",
173
+ "vibrant colors",
174
+ "fantasy creature",
175
+ "high quality",
176
+ "professional artwork"
177
+ ]
178
+
179
+ # Combine base prompt with enhancements
180
+ enhanced = f"{base_prompt}, {', '.join(enhancements)}"
181
+
182
+ return enhanced
183
+
184
+ def _omnigen_generate(self, prompt: str, reference_images: Optional[List],
185
+ width: int, height: int, num_images: int) -> List[Image.Image]:
186
+ """OmniGen specific generation with multimodal inputs"""
187
+ # This would be implemented based on OmniGen's specific API
188
+ # For now, fall back to standard generation
189
+ return self.pipeline(
190
+ prompt=prompt,
191
+ width=width,
192
+ height=height,
193
+ num_images_per_prompt=num_images
194
+ ).images
195
+
196
+ def _generate_fallback_image(self, width: int, height: int) -> Image.Image:
197
+ """Generate a fallback monster image"""
198
+ # Create a simple procedural monster image
199
+ img_array = np.zeros((height, width, 3), dtype=np.uint8)
200
+
201
+ # Add some basic shapes and colors
202
+ center_x, center_y = width // 2, height // 2
203
+ radius = min(width, height) // 3
204
+
205
+ # Create circular body
206
+ y, x = np.ogrid[:height, :width]
207
+ mask = (x - center_x)**2 + (y - center_y)**2 <= radius**2
208
+
209
+ # Random monster color
210
+ color = np.random.randint(50, 200, size=3)
211
+ img_array[mask] = color
212
+
213
+ # Add eyes
214
+ eye_y = center_y - radius // 3
215
+ eye_left_x = center_x - radius // 3
216
+ eye_right_x = center_x + radius // 3
217
+ eye_radius = radius // 8
218
+
219
+ # Left eye
220
+ eye_mask = (x - eye_left_x)**2 + (y - eye_y)**2 <= eye_radius**2
221
+ img_array[eye_mask] = [255, 255, 255]
222
+
223
+ # Right eye
224
+ eye_mask = (x - eye_right_x)**2 + (y - eye_y)**2 <= eye_radius**2
225
+ img_array[eye_mask] = [255, 255, 255]
226
+
227
+ # Convert to PIL Image
228
+ return Image.fromarray(img_array)
229
+
230
+ def edit_image(self,
231
+ image: Union[str, Image.Image],
232
+ prompt: str,
233
+ mask: Optional[Union[str, Image.Image]] = None) -> Image.Image:
234
+ """Edit existing image (for future monster customization)"""
235
+ # This would implement image editing capabilities
236
+ raise NotImplementedError("Image editing not yet implemented")
237
+
238
+ def to(self, device: str):
239
+ """Move pipeline to specified device"""
240
+ self.device = device
241
+ if self.pipeline:
242
+ if device == "cuda" and self.enable_cpu_offload:
243
+ self.pipeline.enable_sequential_cpu_offload()
244
+ else:
245
+ self.pipeline = self.pipeline.to(device)
246
+
247
+ def __del__(self):
248
+ """Cleanup when object is destroyed"""
249
+ if self.pipeline:
250
+ del self.pipeline
251
+ gc.collect()
252
+ if torch.cuda.is_available():
253
+ torch.cuda.empty_cache()
models/model_3d_generator.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from PIL import Image
4
+ import trimesh
5
+ import tempfile
6
+ from typing import Union, Optional, Dict, Any
7
+ from pathlib import Path
8
+ import os
9
+
10
+ class Hunyuan3DGenerator:
11
+ """3D model generation using Hunyuan3D-2.1"""
12
+
13
+ def __init__(self, device: str = "cuda"):
14
+ self.device = device if torch.cuda.is_available() else "cpu"
15
+ self.model = None
16
+ self.preprocessor = None
17
+
18
+ # Model configuration
19
+ self.model_id = "tencent/Hunyuan3D-2.1"
20
+ self.lite_model_id = "tencent/Hunyuan3D-2.1-Lite" # For low VRAM
21
+
22
+ # Generation parameters
23
+ self.num_inference_steps = 50
24
+ self.guidance_scale = 7.5
25
+ self.resolution = 256 # 3D resolution
26
+
27
+ # Use lite model for low VRAM
28
+ self.use_lite = self.device == "cpu" or not self._check_vram()
29
+
30
+ def _check_vram(self) -> bool:
31
+ """Check if we have enough VRAM for full model"""
32
+ if not torch.cuda.is_available():
33
+ return False
34
+
35
+ try:
36
+ vram = torch.cuda.get_device_properties(0).total_memory
37
+ # Need at least 12GB for full model
38
+ return vram > 12 * 1024 * 1024 * 1024
39
+ except:
40
+ return False
41
+
42
+ def load_model(self):
43
+ """Lazy load the 3D generation model"""
44
+ if self.model is None:
45
+ try:
46
+ # Import Hunyuan3D components
47
+ from transformers import AutoModel, AutoProcessor
48
+
49
+ model_id = self.lite_model_id if self.use_lite else self.model_id
50
+
51
+ # Load preprocessor
52
+ self.preprocessor = AutoProcessor.from_pretrained(model_id)
53
+
54
+ # Load model with optimizations
55
+ torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
56
+
57
+ self.model = AutoModel.from_pretrained(
58
+ model_id,
59
+ torch_dtype=torch_dtype,
60
+ low_cpu_mem_usage=True,
61
+ device_map="auto" if self.device == "cuda" else None
62
+ )
63
+
64
+ if self.device == "cpu":
65
+ self.model = self.model.to(self.device)
66
+
67
+ # Enable optimizations
68
+ if hasattr(self.model, 'enable_attention_slicing'):
69
+ self.model.enable_attention_slicing()
70
+
71
+ except Exception as e:
72
+ print(f"Failed to load Hunyuan3D model: {e}")
73
+ # Model loading failed, will use fallback
74
+ self.model = "fallback"
75
+
76
+ def image_to_3d(self,
77
+ image: Union[str, Image.Image, np.ndarray],
78
+ remove_background: bool = True,
79
+ texture_resolution: int = 1024) -> Union[str, trimesh.Trimesh]:
80
+ """Convert 2D image to 3D model"""
81
+
82
+ try:
83
+ # Load model if needed
84
+ if self.model is None:
85
+ self.load_model()
86
+
87
+ # If model loading failed, use fallback
88
+ if self.model == "fallback":
89
+ return self._generate_fallback_3d(image)
90
+
91
+ # Prepare image
92
+ if isinstance(image, str):
93
+ image = Image.open(image)
94
+ elif isinstance(image, np.ndarray):
95
+ image = Image.fromarray(image)
96
+
97
+ # Ensure RGB
98
+ if image.mode != 'RGB':
99
+ image = image.convert('RGB')
100
+
101
+ # Resize for processing
102
+ image = image.resize((512, 512), Image.Resampling.LANCZOS)
103
+
104
+ # Remove background if requested
105
+ if remove_background:
106
+ image = self._remove_background(image)
107
+
108
+ # Process with model
109
+ with torch.no_grad():
110
+ # Preprocess image
111
+ inputs = self.preprocessor(images=image, return_tensors="pt").to(self.device)
112
+
113
+ # Generate 3D
114
+ outputs = self.model.generate(
115
+ **inputs,
116
+ num_inference_steps=self.num_inference_steps,
117
+ guidance_scale=self.guidance_scale,
118
+ texture_resolution=texture_resolution
119
+ )
120
+
121
+ # Extract mesh
122
+ mesh = self._extract_mesh(outputs)
123
+
124
+ # Save mesh
125
+ mesh_path = self._save_mesh(mesh)
126
+
127
+ return mesh_path
128
+
129
+ except Exception as e:
130
+ print(f"3D generation error: {e}")
131
+ return self._generate_fallback_3d(image)
132
+
133
+ def _remove_background(self, image: Image.Image) -> Image.Image:
134
+ """Remove background from image"""
135
+ try:
136
+ # Try using rembg if available
137
+ from rembg import remove
138
+ return remove(image)
139
+ except:
140
+ # Fallback: simple background removal
141
+ # Convert to RGBA
142
+ image = image.convert("RGBA")
143
+
144
+ # Simple white background removal
145
+ datas = image.getdata()
146
+ new_data = []
147
+
148
+ for item in datas:
149
+ # Remove white-ish backgrounds
150
+ if item[0] > 230 and item[1] > 230 and item[2] > 230:
151
+ new_data.append((255, 255, 255, 0))
152
+ else:
153
+ new_data.append(item)
154
+
155
+ image.putdata(new_data)
156
+ return image
157
+
158
+ def _extract_mesh(self, model_outputs: Dict[str, Any]) -> trimesh.Trimesh:
159
+ """Extract mesh from model outputs"""
160
+ # This would depend on actual Hunyuan3D output format
161
+ # Placeholder implementation
162
+
163
+ if 'vertices' in model_outputs and 'faces' in model_outputs:
164
+ vertices = model_outputs['vertices'].cpu().numpy()
165
+ faces = model_outputs['faces'].cpu().numpy()
166
+
167
+ # Create trimesh object
168
+ mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
169
+
170
+ # Add texture if available
171
+ if 'texture' in model_outputs:
172
+ # Apply texture to mesh
173
+ pass
174
+
175
+ return mesh
176
+ else:
177
+ # Create a simple mesh if outputs are different
178
+ return self._create_simple_mesh()
179
+
180
+ def _create_simple_mesh(self) -> trimesh.Trimesh:
181
+ """Create a simple placeholder mesh"""
182
+ # Create a simple sphere as placeholder
183
+ mesh = trimesh.creation.icosphere(subdivisions=3, radius=1.0)
184
+
185
+ # Add some variation
186
+ mesh.vertices += np.random.normal(0, 0.05, mesh.vertices.shape)
187
+
188
+ # Smooth the mesh
189
+ mesh = mesh.smoothed()
190
+
191
+ return mesh
192
+
193
+ def _generate_fallback_3d(self, image: Union[Image.Image, np.ndarray]) -> str:
194
+ """Generate fallback 3D model when main model fails"""
195
+
196
+ # Create a simple 3D representation based on image
197
+ if isinstance(image, np.ndarray):
198
+ image = Image.fromarray(image)
199
+ elif isinstance(image, str):
200
+ image = Image.open(image)
201
+
202
+ # Analyze image for basic shape
203
+ image_array = np.array(image.resize((64, 64)))
204
+
205
+ # Create height map from image brightness
206
+ gray = np.mean(image_array, axis=2)
207
+ height_map = gray / 255.0
208
+
209
+ # Create mesh from height map
210
+ mesh = self._heightmap_to_mesh(height_map)
211
+
212
+ # Save and return path
213
+ return self._save_mesh(mesh)
214
+
215
+ def _heightmap_to_mesh(self, heightmap: np.ndarray) -> trimesh.Trimesh:
216
+ """Convert heightmap to 3D mesh"""
217
+ h, w = heightmap.shape
218
+
219
+ # Create vertices
220
+ vertices = []
221
+ faces = []
222
+
223
+ # Create vertex grid
224
+ for i in range(h):
225
+ for j in range(w):
226
+ x = (j - w/2) / w * 2
227
+ y = (i - h/2) / h * 2
228
+ z = heightmap[i, j] * 0.5
229
+ vertices.append([x, y, z])
230
+
231
+ # Create faces
232
+ for i in range(h-1):
233
+ for j in range(w-1):
234
+ # Two triangles per grid square
235
+ v1 = i * w + j
236
+ v2 = v1 + 1
237
+ v3 = v1 + w
238
+ v4 = v3 + 1
239
+
240
+ faces.append([v1, v2, v3])
241
+ faces.append([v2, v4, v3])
242
+
243
+ vertices = np.array(vertices)
244
+ faces = np.array(faces)
245
+
246
+ # Create mesh
247
+ mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
248
+
249
+ # Apply smoothing
250
+ mesh = mesh.smoothed()
251
+
252
+ return mesh
253
+
254
+ def _save_mesh(self, mesh: trimesh.Trimesh) -> str:
255
+ """Save mesh to file"""
256
+ # Create temporary file
257
+ with tempfile.NamedTemporaryFile(suffix='.glb', delete=False) as tmp:
258
+ mesh_path = tmp.name
259
+
260
+ # Export mesh
261
+ mesh.export(mesh_path)
262
+
263
+ return mesh_path
264
+
265
+ def text_to_3d(self, text_prompt: str) -> str:
266
+ """Generate 3D model from text description"""
267
+ # First generate image, then convert to 3D
268
+ # This would require image generator integration
269
+ raise NotImplementedError("Text to 3D requires image generation first")
270
+
271
+ def to(self, device: str):
272
+ """Move model to specified device"""
273
+ self.device = device
274
+ if self.model and self.model != "fallback":
275
+ self.model.to(device)
276
+
277
+ def __del__(self):
278
+ """Cleanup when object is destroyed"""
279
+ if self.model and self.model != "fallback":
280
+ del self.model
281
+ if self.preprocessor:
282
+ del self.preprocessor
283
+ torch.cuda.empty_cache()
models/rigging_processor.py ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import trimesh
3
+ from typing import Union, Dict, List, Tuple, Optional
4
+ import tempfile
5
+ from pathlib import Path
6
+
7
+ class UniRigProcessor:
8
+ """Automatic rigging for 3D models using simplified UniRig approach"""
9
+
10
+ def __init__(self, device: str = "cuda"):
11
+ self.device = device
12
+ self.model = None
13
+
14
+ # Rigging parameters
15
+ self.bone_detection_threshold = 0.1
16
+ self.max_bones = 20
17
+ self.min_bones = 5
18
+
19
+ # Animation presets for monsters
20
+ self.animation_presets = {
21
+ 'idle': self._create_idle_animation,
22
+ 'walk': self._create_walk_animation,
23
+ 'attack': self._create_attack_animation,
24
+ 'happy': self._create_happy_animation
25
+ }
26
+
27
+ def load_model(self):
28
+ """Load rigging model (placeholder for actual implementation)"""
29
+ # In production, this would load the actual UniRig model
30
+ # For now, we'll use procedural rigging
31
+ self.model = "procedural"
32
+
33
+ def rig_mesh(self,
34
+ mesh: Union[str, trimesh.Trimesh],
35
+ mesh_type: str = "monster") -> Dict[str, any]:
36
+ """Add rigging to a 3D mesh"""
37
+
38
+ try:
39
+ # Load mesh if path provided
40
+ if isinstance(mesh, str):
41
+ mesh = trimesh.load(mesh)
42
+
43
+ # Ensure model is loaded
44
+ if self.model is None:
45
+ self.load_model()
46
+
47
+ # Analyze mesh structure
48
+ mesh_analysis = self._analyze_mesh(mesh)
49
+
50
+ # Generate skeleton
51
+ skeleton = self._generate_skeleton(mesh, mesh_analysis)
52
+
53
+ # Compute bone weights
54
+ weights = self._compute_bone_weights(mesh, skeleton)
55
+
56
+ # Create rigged model
57
+ rigged_model = {
58
+ 'mesh': mesh,
59
+ 'skeleton': skeleton,
60
+ 'weights': weights,
61
+ 'animations': self._create_default_animations(skeleton),
62
+ 'metadata': {
63
+ 'mesh_type': mesh_type,
64
+ 'bone_count': len(skeleton['bones']),
65
+ 'vertex_count': len(mesh.vertices)
66
+ }
67
+ }
68
+
69
+ # Save rigged model
70
+ output_path = self._save_rigged_model(rigged_model)
71
+
72
+ return output_path
73
+
74
+ except Exception as e:
75
+ print(f"Rigging error: {e}")
76
+ # Return original mesh if rigging fails
77
+ return self._save_mesh_without_rigging(mesh)
78
+
79
+ def _analyze_mesh(self, mesh: trimesh.Trimesh) -> Dict[str, any]:
80
+ """Analyze mesh structure for rigging"""
81
+
82
+ # Get mesh bounds and center
83
+ bounds = mesh.bounds
84
+ center = mesh.centroid
85
+
86
+ # Analyze mesh topology
87
+ analysis = {
88
+ 'bounds': bounds,
89
+ 'center': center,
90
+ 'height': bounds[1][2] - bounds[0][2],
91
+ 'width': bounds[1][0] - bounds[0][0],
92
+ 'depth': bounds[1][1] - bounds[0][1],
93
+ 'is_symmetric': self._check_symmetry(mesh),
94
+ 'detected_limbs': self._detect_limbs(mesh),
95
+ 'mesh_type': self._classify_mesh_type(mesh)
96
+ }
97
+
98
+ return analysis
99
+
100
+ def _check_symmetry(self, mesh: trimesh.Trimesh) -> bool:
101
+ """Check if mesh is roughly symmetric"""
102
+ # Simple check: compare left and right halves
103
+ vertices = mesh.vertices
104
+ center_x = mesh.centroid[0]
105
+
106
+ left_verts = vertices[vertices[:, 0] < center_x]
107
+ right_verts = vertices[vertices[:, 0] > center_x]
108
+
109
+ # Check if similar number of vertices on each side
110
+ ratio = len(left_verts) / (len(right_verts) + 1)
111
+ return 0.8 < ratio < 1.2
112
+
113
+ def _detect_limbs(self, mesh: trimesh.Trimesh) -> List[Dict]:
114
+ """Detect potential limbs in the mesh"""
115
+ # Simplified limb detection using vertex clustering
116
+ from sklearn.cluster import DBSCAN
117
+
118
+ limbs = []
119
+
120
+ try:
121
+ # Cluster vertices to find distinct parts
122
+ clustering = DBSCAN(eps=0.1, min_samples=10).fit(mesh.vertices)
123
+
124
+ # Analyze each cluster
125
+ for label in set(clustering.labels_):
126
+ if label == -1: # Noise
127
+ continue
128
+
129
+ cluster_verts = mesh.vertices[clustering.labels_ == label]
130
+
131
+ # Check if cluster could be a limb
132
+ cluster_bounds = np.array([cluster_verts.min(axis=0), cluster_verts.max(axis=0)])
133
+ dimensions = cluster_bounds[1] - cluster_bounds[0]
134
+
135
+ # Limbs are typically elongated
136
+ if max(dimensions) / (min(dimensions) + 0.001) > 2:
137
+ limbs.append({
138
+ 'center': cluster_verts.mean(axis=0),
139
+ 'direction': dimensions,
140
+ 'size': len(cluster_verts)
141
+ })
142
+ except:
143
+ # Fallback if clustering fails
144
+ pass
145
+
146
+ return limbs
147
+
148
+ def _classify_mesh_type(self, mesh: trimesh.Trimesh) -> str:
149
+ """Classify the type of creature mesh"""
150
+ analysis = {
151
+ 'height': mesh.bounds[1][2] - mesh.bounds[0][2],
152
+ 'width': mesh.bounds[1][0] - mesh.bounds[0][0],
153
+ 'depth': mesh.bounds[1][1] - mesh.bounds[0][1]
154
+ }
155
+
156
+ # Simple classification based on proportions
157
+ aspect_ratio = analysis['height'] / max(analysis['width'], analysis['depth'])
158
+
159
+ if aspect_ratio > 1.5:
160
+ return 'bipedal' # Tall creatures
161
+ elif aspect_ratio < 0.7:
162
+ return 'quadruped' # Wide creatures
163
+ else:
164
+ return 'hybrid' # Mixed proportions
165
+
166
+ def _generate_skeleton(self, mesh: trimesh.Trimesh, analysis: Dict) -> Dict:
167
+ """Generate skeleton for the mesh"""
168
+
169
+ skeleton = {
170
+ 'bones': [],
171
+ 'hierarchy': {},
172
+ 'bind_poses': []
173
+ }
174
+
175
+ # Create root bone at center
176
+ root_pos = analysis['center']
177
+ root_bone = {
178
+ 'id': 0,
179
+ 'name': 'root',
180
+ 'position': root_pos,
181
+ 'parent': -1,
182
+ 'children': []
183
+ }
184
+ skeleton['bones'].append(root_bone)
185
+
186
+ # Generate bones based on mesh type
187
+ mesh_type = analysis['mesh_type']
188
+
189
+ if mesh_type == 'bipedal':
190
+ skeleton = self._generate_bipedal_skeleton(mesh, skeleton, analysis)
191
+ elif mesh_type == 'quadruped':
192
+ skeleton = self._generate_quadruped_skeleton(mesh, skeleton, analysis)
193
+ else:
194
+ skeleton = self._generate_hybrid_skeleton(mesh, skeleton, analysis)
195
+
196
+ # Build hierarchy
197
+ for bone in skeleton['bones']:
198
+ if bone['parent'] >= 0:
199
+ skeleton['bones'][bone['parent']]['children'].append(bone['id'])
200
+
201
+ return skeleton
202
+
203
+ def _generate_bipedal_skeleton(self, mesh: trimesh.Trimesh, skeleton: Dict, analysis: Dict) -> Dict:
204
+ """Generate skeleton for bipedal creature"""
205
+
206
+ bounds = analysis['bounds']
207
+ center = analysis['center']
208
+ height = analysis['height']
209
+
210
+ # Spine bones
211
+ spine_positions = [
212
+ center + [0, 0, -height * 0.4], # Hips
213
+ center + [0, 0, 0], # Chest
214
+ center + [0, 0, height * 0.3] # Head
215
+ ]
216
+
217
+ parent_id = 0
218
+ for i, pos in enumerate(spine_positions):
219
+ bone = {
220
+ 'id': len(skeleton['bones']),
221
+ 'name': ['hips', 'chest', 'head'][i],
222
+ 'position': pos,
223
+ 'parent': parent_id,
224
+ 'children': []
225
+ }
226
+ skeleton['bones'].append(bone)
227
+ parent_id = bone['id']
228
+
229
+ # Add limbs
230
+ chest_id = skeleton['bones'][2]['id'] # Chest bone
231
+ hips_id = skeleton['bones'][1]['id'] # Hips bone
232
+
233
+ # Arms
234
+ arm_offset = analysis['width'] * 0.4
235
+ for side, offset in [('left', -arm_offset), ('right', arm_offset)]:
236
+ shoulder_pos = skeleton['bones'][chest_id]['position'] + [offset, 0, 0]
237
+ elbow_pos = shoulder_pos + [offset * 0.5, 0, -height * 0.2]
238
+
239
+ # Shoulder
240
+ shoulder = {
241
+ 'id': len(skeleton['bones']),
242
+ 'name': f'{side}_shoulder',
243
+ 'position': shoulder_pos,
244
+ 'parent': chest_id,
245
+ 'children': []
246
+ }
247
+ skeleton['bones'].append(shoulder)
248
+
249
+ # Elbow/Hand
250
+ hand = {
251
+ 'id': len(skeleton['bones']),
252
+ 'name': f'{side}_hand',
253
+ 'position': elbow_pos,
254
+ 'parent': shoulder['id'],
255
+ 'children': []
256
+ }
257
+ skeleton['bones'].append(hand)
258
+
259
+ # Legs
260
+ for side, offset in [('left', -arm_offset * 0.5), ('right', arm_offset * 0.5)]:
261
+ hip_pos = skeleton['bones'][hips_id]['position'] + [offset, 0, 0]
262
+ foot_pos = hip_pos + [0, 0, -height * 0.4]
263
+
264
+ # Leg
265
+ leg = {
266
+ 'id': len(skeleton['bones']),
267
+ 'name': f'{side}_leg',
268
+ 'position': hip_pos,
269
+ 'parent': hips_id,
270
+ 'children': []
271
+ }
272
+ skeleton['bones'].append(leg)
273
+
274
+ # Foot
275
+ foot = {
276
+ 'id': len(skeleton['bones']),
277
+ 'name': f'{side}_foot',
278
+ 'position': foot_pos,
279
+ 'parent': leg['id'],
280
+ 'children': []
281
+ }
282
+ skeleton['bones'].append(foot)
283
+
284
+ return skeleton
285
+
286
+ def _generate_quadruped_skeleton(self, mesh: trimesh.Trimesh, skeleton: Dict, analysis: Dict) -> Dict:
287
+ """Generate skeleton for quadruped creature"""
288
+
289
+ # Similar to bipedal but with 4 legs and horizontal spine
290
+ center = analysis['center']
291
+ width = analysis['width']
292
+ depth = analysis['depth']
293
+
294
+ # Spine (horizontal)
295
+ spine_positions = [
296
+ center + [-width * 0.3, 0, 0], # Tail
297
+ center, # Body
298
+ center + [width * 0.3, 0, 0] # Head
299
+ ]
300
+
301
+ parent_id = 0
302
+ for i, pos in enumerate(spine_positions):
303
+ bone = {
304
+ 'id': len(skeleton['bones']),
305
+ 'name': ['tail', 'body', 'head'][i],
306
+ 'position': pos,
307
+ 'parent': parent_id,
308
+ 'children': []
309
+ }
310
+ skeleton['bones'].append(bone)
311
+ parent_id = bone['id'] if i < 2 else skeleton['bones'][1]['id']
312
+
313
+ # Add 4 legs
314
+ body_id = skeleton['bones'][1]['id']
315
+
316
+ for front_back, x_offset in [('front', width * 0.2), ('back', -width * 0.2)]:
317
+ for side, z_offset in [('left', -depth * 0.3), ('right', depth * 0.3)]:
318
+ leg_pos = skeleton['bones'][body_id]['position'] + [x_offset, -analysis['height'] * 0.3, z_offset]
319
+
320
+ leg = {
321
+ 'id': len(skeleton['bones']),
322
+ 'name': f'{front_back}_{side}_leg',
323
+ 'position': leg_pos,
324
+ 'parent': body_id,
325
+ 'children': []
326
+ }
327
+ skeleton['bones'].append(leg)
328
+
329
+ return skeleton
330
+
331
+ def _generate_hybrid_skeleton(self, mesh: trimesh.Trimesh, skeleton: Dict, analysis: Dict) -> Dict:
332
+ """Generate skeleton for hybrid creature"""
333
+ # Mix of bipedal and quadruped features
334
+ # For simplicity, use bipedal as base
335
+ return self._generate_bipedal_skeleton(mesh, skeleton, analysis)
336
+
337
+ def _compute_bone_weights(self, mesh: trimesh.Trimesh, skeleton: Dict) -> np.ndarray:
338
+ """Compute bone weights for vertices"""
339
+
340
+ num_vertices = len(mesh.vertices)
341
+ num_bones = len(skeleton['bones'])
342
+
343
+ # Initialize weights matrix
344
+ weights = np.zeros((num_vertices, num_bones))
345
+
346
+ # For each vertex, compute influence from each bone
347
+ for v_idx, vertex in enumerate(mesh.vertices):
348
+ total_weight = 0
349
+
350
+ for b_idx, bone in enumerate(skeleton['bones']):
351
+ # Distance-based weight
352
+ distance = np.linalg.norm(vertex - bone['position'])
353
+
354
+ # Inverse distance weight with falloff
355
+ weight = 1.0 / (distance + 0.1)
356
+ weights[v_idx, b_idx] = weight
357
+ total_weight += weight
358
+
359
+ # Normalize weights
360
+ if total_weight > 0:
361
+ weights[v_idx] /= total_weight
362
+
363
+ # Keep only top 4 influences per vertex (standard for game engines)
364
+ top_4 = np.argsort(weights[v_idx])[-4:]
365
+ mask = np.zeros(num_bones, dtype=bool)
366
+ mask[top_4] = True
367
+ weights[v_idx, ~mask] = 0
368
+
369
+ # Re-normalize
370
+ if weights[v_idx].sum() > 0:
371
+ weights[v_idx] /= weights[v_idx].sum()
372
+
373
+ return weights
374
+
375
+ def _create_default_animations(self, skeleton: Dict) -> Dict[str, List]:
376
+ """Create default animations for the skeleton"""
377
+
378
+ animations = {}
379
+
380
+ # Create basic animation sets
381
+ for anim_name, anim_func in self.animation_presets.items():
382
+ animations[anim_name] = anim_func(skeleton)
383
+
384
+ return animations
385
+
386
+ def _create_idle_animation(self, skeleton: Dict) -> List[Dict]:
387
+ """Create idle animation keyframes"""
388
+
389
+ keyframes = []
390
+
391
+ # Simple breathing/bobbing motion
392
+ for t in np.linspace(0, 2 * np.pi, 30):
393
+ frame = {
394
+ 'time': t / (2 * np.pi),
395
+ 'bones': {}
396
+ }
397
+
398
+ # Subtle movement for each bone
399
+ for bone in skeleton['bones']:
400
+ if 'chest' in bone['name'] or 'body' in bone['name']:
401
+ # Breathing motion
402
+ offset = np.sin(t) * 0.02
403
+ frame['bones'][bone['id']] = {
404
+ 'position': bone['position'] + [0, offset, 0],
405
+ 'rotation': [0, 0, 0, 1] # Quaternion
406
+ }
407
+ else:
408
+ # No movement
409
+ frame['bones'][bone['id']] = {
410
+ 'position': bone['position'],
411
+ 'rotation': [0, 0, 0, 1]
412
+ }
413
+
414
+ keyframes.append(frame)
415
+
416
+ return keyframes
417
+
418
+ def _create_walk_animation(self, skeleton: Dict) -> List[Dict]:
419
+ """Create walk animation keyframes"""
420
+ # Simplified walk cycle
421
+ keyframes = []
422
+
423
+ for t in np.linspace(0, 2 * np.pi, 60):
424
+ frame = {
425
+ 'time': t / (2 * np.pi),
426
+ 'bones': {}
427
+ }
428
+
429
+ # Animate legs with sine waves
430
+ for bone in skeleton['bones']:
431
+ if 'leg' in bone['name'] or 'foot' in bone['name']:
432
+ # Alternating leg movement
433
+ phase = 0 if 'left' in bone['name'] else np.pi
434
+ offset = np.sin(t + phase) * 0.1
435
+
436
+ frame['bones'][bone['id']] = {
437
+ 'position': bone['position'] + [offset, 0, 0],
438
+ 'rotation': [0, 0, 0, 1]
439
+ }
440
+ else:
441
+ frame['bones'][bone['id']] = {
442
+ 'position': bone['position'],
443
+ 'rotation': [0, 0, 0, 1]
444
+ }
445
+
446
+ keyframes.append(frame)
447
+
448
+ return keyframes
449
+
450
+ def _create_attack_animation(self, skeleton: Dict) -> List[Dict]:
451
+ """Create attack animation keyframes"""
452
+ # Quick strike motion
453
+ keyframes = []
454
+
455
+ # Wind up
456
+ for t in np.linspace(0, 0.3, 10):
457
+ frame = {'time': t, 'bones': {}}
458
+ for bone in skeleton['bones']:
459
+ frame['bones'][bone['id']] = {
460
+ 'position': bone['position'],
461
+ 'rotation': [0, 0, 0, 1]
462
+ }
463
+ keyframes.append(frame)
464
+
465
+ # Strike
466
+ for t in np.linspace(0.3, 0.5, 5):
467
+ frame = {'time': t, 'bones': {}}
468
+ for bone in skeleton['bones']:
469
+ if 'hand' in bone['name'] or 'head' in bone['name']:
470
+ # Forward motion
471
+ offset = (t - 0.3) * 0.5
472
+ frame['bones'][bone['id']] = {
473
+ 'position': bone['position'] + [offset, 0, 0],
474
+ 'rotation': [0, 0, 0, 1]
475
+ }
476
+ else:
477
+ frame['bones'][bone['id']] = {
478
+ 'position': bone['position'],
479
+ 'rotation': [0, 0, 0, 1]
480
+ }
481
+ keyframes.append(frame)
482
+
483
+ # Return
484
+ for t in np.linspace(0.5, 1.0, 10):
485
+ frame = {'time': t, 'bones': {}}
486
+ for bone in skeleton['bones']:
487
+ frame['bones'][bone['id']] = {
488
+ 'position': bone['position'],
489
+ 'rotation': [0, 0, 0, 1]
490
+ }
491
+ keyframes.append(frame)
492
+
493
+ return keyframes
494
+
495
+ def _create_happy_animation(self, skeleton: Dict) -> List[Dict]:
496
+ """Create happy/excited animation keyframes"""
497
+ # Jumping or bouncing motion
498
+ keyframes = []
499
+
500
+ for t in np.linspace(0, 2 * np.pi, 40):
501
+ frame = {
502
+ 'time': t / (2 * np.pi),
503
+ 'bones': {}
504
+ }
505
+
506
+ # Bouncing motion
507
+ bounce = abs(np.sin(t * 2)) * 0.1
508
+
509
+ for bone in skeleton['bones']:
510
+ frame['bones'][bone['id']] = {
511
+ 'position': bone['position'] + [0, bounce, 0],
512
+ 'rotation': [0, 0, 0, 1]
513
+ }
514
+
515
+ keyframes.append(frame)
516
+
517
+ return keyframes
518
+
519
+ def _save_rigged_model(self, rigged_model: Dict) -> str:
520
+ """Save rigged model to file"""
521
+
522
+ # Create temporary file
523
+ with tempfile.NamedTemporaryFile(suffix='.glb', delete=False) as tmp:
524
+ output_path = tmp.name
525
+
526
+ # In production, this would export the rigged model with animations
527
+ # For now, just save the mesh
528
+ rigged_model['mesh'].export(output_path)
529
+
530
+ return output_path
531
+
532
+ def _save_mesh_without_rigging(self, mesh: Union[str, trimesh.Trimesh]) -> str:
533
+ """Save mesh without rigging as fallback"""
534
+
535
+ if isinstance(mesh, str):
536
+ return mesh
537
+
538
+ with tempfile.NamedTemporaryFile(suffix='.glb', delete=False) as tmp:
539
+ output_path = tmp.name
540
+
541
+ mesh.export(output_path)
542
+ return output_path
543
+
544
+ def to(self, device: str):
545
+ """Move model to specified device (compatibility method)"""
546
+ self.device = device
models/stt_processor.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchaudio
3
+ from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor
4
+ import numpy as np
5
+ from typing import Optional, Union
6
+ import librosa
7
+ import soundfile as sf
8
+ import os
9
+
10
+ class KyutaiSTTProcessor:
11
+ """Processor for Kyutai Speech-to-Text model"""
12
+
13
+ def __init__(self, device: str = "cuda"):
14
+ self.device = device if torch.cuda.is_available() else "cpu"
15
+ self.model = None
16
+ self.processor = None
17
+ self.model_id = "kyutai/stt-2.6b-en" # English-only model for better accuracy
18
+
19
+ # Audio processing parameters
20
+ self.sample_rate = 16000
21
+ self.chunk_length_s = 30 # Process in 30-second chunks
22
+ self.max_duration = 120 # Maximum 2 minutes of audio
23
+
24
+ def load_model(self):
25
+ """Lazy load the STT model"""
26
+ if self.model is None:
27
+ try:
28
+ # Load processor and model
29
+ self.processor = AutoProcessor.from_pretrained(self.model_id)
30
+
31
+ # Model configuration for low VRAM usage
32
+ torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
33
+
34
+ self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
35
+ self.model_id,
36
+ torch_dtype=torch_dtype,
37
+ low_cpu_mem_usage=True,
38
+ use_safetensors=True
39
+ )
40
+
41
+ self.model.to(self.device)
42
+
43
+ # Enable better generation settings
44
+ self.model.generation_config.language = "english"
45
+ self.model.generation_config.task = "transcribe"
46
+ self.model.generation_config.forced_decoder_ids = None
47
+
48
+ except Exception as e:
49
+ print(f"Failed to load STT model: {e}")
50
+ raise
51
+
52
+ def preprocess_audio(self, audio_path: str) -> np.ndarray:
53
+ """Preprocess audio file for transcription"""
54
+ try:
55
+ # Load audio file
56
+ audio, sr = librosa.load(audio_path, sr=None, mono=True)
57
+
58
+ # Resample if necessary
59
+ if sr != self.sample_rate:
60
+ audio = librosa.resample(audio, orig_sr=sr, target_sr=self.sample_rate)
61
+
62
+ # Limit duration
63
+ max_samples = self.max_duration * self.sample_rate
64
+ if len(audio) > max_samples:
65
+ audio = audio[:max_samples]
66
+
67
+ # Normalize audio
68
+ audio = audio / np.max(np.abs(audio) + 1e-7)
69
+
70
+ return audio
71
+
72
+ except Exception as e:
73
+ print(f"Error preprocessing audio: {e}")
74
+ raise
75
+
76
+ def transcribe(self, audio_input: Union[str, np.ndarray]) -> str:
77
+ """Transcribe audio to text"""
78
+ try:
79
+ # Load model if not already loaded
80
+ self.load_model()
81
+
82
+ # Process audio input
83
+ if isinstance(audio_input, str):
84
+ audio = self.preprocess_audio(audio_input)
85
+ else:
86
+ audio = audio_input
87
+
88
+ # Process with model
89
+ inputs = self.processor(
90
+ audio,
91
+ sampling_rate=self.sample_rate,
92
+ return_tensors="pt"
93
+ ).to(self.device)
94
+
95
+ # Generate transcription
96
+ with torch.no_grad():
97
+ generated_ids = self.model.generate(
98
+ inputs["input_features"],
99
+ max_new_tokens=128,
100
+ do_sample=False,
101
+ num_beams=1 # Greedy decoding for speed
102
+ )
103
+
104
+ # Decode transcription
105
+ transcription = self.processor.batch_decode(
106
+ generated_ids,
107
+ skip_special_tokens=True,
108
+ clean_up_tokenization_spaces=True
109
+ )[0]
110
+
111
+ # Clean up transcription
112
+ transcription = self._clean_transcription(transcription)
113
+
114
+ return transcription
115
+
116
+ except Exception as e:
117
+ print(f"Transcription error: {e}")
118
+ # Return a default description on error
119
+ return "Create a unique digital monster companion"
120
+
121
+ def _clean_transcription(self, text: str) -> str:
122
+ """Clean up transcription output"""
123
+ # Remove extra whitespace
124
+ text = " ".join(text.split())
125
+
126
+ # Ensure proper capitalization
127
+ if text and text[0].islower():
128
+ text = text[0].upper() + text[1:]
129
+
130
+ # Add period if missing
131
+ if text and not text[-1] in '.!?':
132
+ text += '.'
133
+
134
+ return text
135
+
136
+ def transcribe_streaming(self, audio_stream):
137
+ """Streaming transcription (for future implementation)"""
138
+ # This would handle real-time audio streams
139
+ # For now, return placeholder
140
+ raise NotImplementedError("Streaming transcription not yet implemented")
141
+
142
+ def to(self, device: str):
143
+ """Move model to specified device"""
144
+ self.device = device
145
+ if self.model:
146
+ self.model.to(device)
147
+
148
+ def __del__(self):
149
+ """Cleanup when object is destroyed"""
150
+ if self.model:
151
+ del self.model
152
+ if self.processor:
153
+ del self.processor
154
+ torch.cuda.empty_cache()
models/text_generator.py ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import json
4
+ import random
5
+ from typing import Dict, Any, List
6
+
7
+ class QwenTextGenerator:
8
+ """Text generation using Qwen2.5-0.5B-Instruct for monster traits and dialogue"""
9
+
10
+ def __init__(self, device: str = "cuda"):
11
+ self.device = device if torch.cuda.is_available() else "cpu"
12
+ self.model = None
13
+ self.tokenizer = None
14
+ self.model_id = "Qwen/Qwen2.5-0.5B-Instruct"
15
+
16
+ # Generation parameters
17
+ self.max_new_tokens = 150
18
+ self.temperature = 0.8
19
+ self.top_p = 0.9
20
+
21
+ # Monster trait templates
22
+ self.trait_categories = {
23
+ 'elements': ['fire', 'water', 'earth', 'wind', 'electric', 'ice', 'nature', 'dark', 'light', 'neutral'],
24
+ 'personalities': ['brave', 'timid', 'aggressive', 'gentle', 'playful', 'serious', 'loyal', 'independent', 'curious', 'protective'],
25
+ 'body_types': ['bipedal', 'quadruped', 'serpentine', 'avian', 'aquatic', 'insectoid', 'humanoid', 'amorphous'],
26
+ 'sizes': ['tiny', 'small', 'medium', 'large', 'giant'],
27
+ 'special_features': ['wings', 'horns', 'tail', 'spikes', 'fur', 'scales', 'armor', 'crystals', 'flames', 'aura']
28
+ }
29
+
30
+ def load_model(self):
31
+ """Lazy load the text generation model"""
32
+ if self.model is None:
33
+ try:
34
+ # Load tokenizer
35
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
36
+
37
+ # Model configuration
38
+ torch_dtype = torch.float16 if self.device == "cuda" else torch.float32
39
+
40
+ self.model = AutoModelForCausalLM.from_pretrained(
41
+ self.model_id,
42
+ torch_dtype=torch_dtype,
43
+ device_map="auto" if self.device == "cuda" else None,
44
+ low_cpu_mem_usage=True
45
+ )
46
+
47
+ if self.device == "cpu":
48
+ self.model.to(self.device)
49
+
50
+ except Exception as e:
51
+ print(f"Failed to load text generation model: {e}")
52
+ raise
53
+
54
+ def generate_traits(self, description: str) -> Dict[str, Any]:
55
+ """Generate monster traits from description"""
56
+ try:
57
+ self.load_model()
58
+
59
+ # Create prompt for trait generation
60
+ prompt = self._create_trait_prompt(description)
61
+
62
+ # Generate response
63
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
64
+
65
+ with torch.no_grad():
66
+ outputs = self.model.generate(
67
+ **inputs,
68
+ max_new_tokens=self.max_new_tokens,
69
+ temperature=self.temperature,
70
+ top_p=self.top_p,
71
+ do_sample=True,
72
+ pad_token_id=self.tokenizer.eos_token_id
73
+ )
74
+
75
+ response = self.tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
76
+
77
+ # Parse traits from response
78
+ traits = self._parse_traits(response, description)
79
+
80
+ return traits
81
+
82
+ except Exception as e:
83
+ print(f"Error generating traits: {e}")
84
+ return self._generate_fallback_traits(description)
85
+
86
+ def generate_dialogue(self, traits: Dict[str, Any]) -> str:
87
+ """Generate monster dialogue (emoji + numbers)"""
88
+ try:
89
+ # Create emoji dialogue based on personality and mood
90
+ personality = traits.get('personality', 'neutral')
91
+
92
+ # Emoji mapping for personalities
93
+ emoji_map = {
94
+ 'brave': ['💪', '🔥', '⚔️', '🛡️'],
95
+ 'timid': ['😰', '🥺', '💦', '❓'],
96
+ 'aggressive': ['😤', '💢', '🔥', '⚡'],
97
+ 'gentle': ['💚', '🌸', '✨', '🌟'],
98
+ 'playful': ['😊', '🎮', '🎯', '🎪'],
99
+ 'serious': ['🤖', '📊', '⚡', '💯'],
100
+ 'loyal': ['💖', '🤝', '🛡️', '⭐'],
101
+ 'independent': ['🚀', '🌍', '🔮', '💫'],
102
+ 'curious': ['🔍', '❓', '💡', '🌟'],
103
+ 'protective': ['🛡️', '💪', '🏰', '⚔️']
104
+ }
105
+
106
+ # Get appropriate emojis
107
+ emojis = emoji_map.get(personality, ['🤖', '💚', '✨'])
108
+ selected_emojis = random.sample(emojis, min(2, len(emojis)))
109
+
110
+ # Generate status numbers (representing monster's current state)
111
+ hp_percent = random.randint(70, 100)
112
+ happiness = random.randint(60, 95)
113
+ energy = random.randint(50, 90)
114
+
115
+ # Create dialogue
116
+ dialogue = f"{selected_emojis[0]}{selected_emojis[1] if len(selected_emojis) > 1 else '💚'}"
117
+ dialogue += f"{hp_percent}️⃣{happiness}️⃣"
118
+
119
+ return dialogue
120
+
121
+ except Exception as e:
122
+ print(f"Error generating dialogue: {e}")
123
+ return "🤖💚9️⃣0️⃣"
124
+
125
+ def _create_trait_prompt(self, description: str) -> str:
126
+ """Create prompt for trait generation"""
127
+ prompt = f"""<|im_start|>system
128
+ You are a creative game designer creating unique digital monsters. Generate detailed traits for a monster based on the description.
129
+ <|im_end|>
130
+ <|im_start|>user
131
+ Create traits for this monster: {description}
132
+
133
+ Include: name, species, element, personality, appearance details, and special abilities.
134
+ <|im_end|>
135
+ <|im_start|>assistant
136
+ """
137
+ return prompt
138
+
139
+ def _parse_traits(self, response: str, original_description: str) -> Dict[str, Any]:
140
+ """Parse traits from model response"""
141
+ traits = {
142
+ 'description': original_description,
143
+ 'raw_response': response
144
+ }
145
+
146
+ # Extract name
147
+ if "name:" in response.lower():
148
+ name_start = response.lower().find("name:") + 5
149
+ name_end = response.find("\n", name_start)
150
+ if name_end == -1:
151
+ name_end = len(response)
152
+ traits['name'] = response[name_start:name_end].strip()
153
+ else:
154
+ traits['name'] = self._generate_name()
155
+
156
+ # Extract or assign element
157
+ element_found = False
158
+ for element in self.trait_categories['elements']:
159
+ if element in response.lower():
160
+ traits['element'] = element
161
+ element_found = True
162
+ break
163
+
164
+ if not element_found:
165
+ traits['element'] = random.choice(self.trait_categories['elements'])
166
+
167
+ # Extract or assign personality
168
+ personality_found = False
169
+ for personality in self.trait_categories['personalities']:
170
+ if personality in response.lower():
171
+ traits['personality'] = personality
172
+ personality_found = True
173
+ break
174
+
175
+ if not personality_found:
176
+ traits['personality'] = random.choice(self.trait_categories['personalities'])
177
+
178
+ # Extract appearance
179
+ traits['appearance'] = self._extract_appearance(response)
180
+
181
+ # Extract abilities
182
+ traits['abilities'] = self._extract_abilities(response, traits['element'])
183
+
184
+ # Add color scheme based on element
185
+ traits['color_scheme'] = self._get_color_scheme(traits['element'])
186
+
187
+ return traits
188
+
189
+ def _generate_name(self) -> str:
190
+ """Generate a random monster name"""
191
+ prefixes = ['Pyro', 'Aqua', 'Terra', 'Aero', 'Volt', 'Cryo', 'Flora', 'Shadow', 'Lumi', 'Neo']
192
+ suffixes = ['mon', 'beast', 'guard', 'wing', 'claw', 'fang', 'horn', 'tail', 'byte', 'spark']
193
+
194
+ return random.choice(prefixes) + random.choice(suffixes)
195
+
196
+ def _extract_appearance(self, response: str) -> str:
197
+ """Extract appearance description"""
198
+ appearance_keywords = ['appearance', 'looks like', 'resembles', 'body', 'color', 'size']
199
+
200
+ for keyword in appearance_keywords:
201
+ if keyword in response.lower():
202
+ start = response.lower().find(keyword)
203
+ end = response.find('.', start)
204
+ if end == -1:
205
+ end = response.find('\n', start)
206
+ if end == -1:
207
+ end = len(response)
208
+
209
+ return response[start:end].strip()
210
+
211
+ # Fallback appearance
212
+ body_type = random.choice(self.trait_categories['body_types'])
213
+ size = random.choice(self.trait_categories['sizes'])
214
+ feature = random.choice(self.trait_categories['special_features'])
215
+
216
+ return f"A {size} {body_type} creature with {feature}"
217
+
218
+ def _extract_abilities(self, response: str, element: str) -> List[str]:
219
+ """Extract or generate abilities"""
220
+ abilities = []
221
+
222
+ ability_keywords = ['ability', 'power', 'skill', 'can', 'capable']
223
+ for keyword in ability_keywords:
224
+ if keyword in response.lower():
225
+ # Try to extract abilities from response
226
+ start = response.lower().find(keyword)
227
+ end = response.find('.', start)
228
+ if end > start:
229
+ ability_text = response[start:end]
230
+ abilities.append(ability_text.strip())
231
+
232
+ # If no abilities found, generate based on element
233
+ if not abilities:
234
+ element_abilities = {
235
+ 'fire': ['Flame Burst', 'Heat Wave', 'Ember Shield'],
236
+ 'water': ['Aqua Jet', 'Bubble Shield', 'Tidal Wave'],
237
+ 'earth': ['Rock Throw', 'Earthquake', 'Stone Armor'],
238
+ 'wind': ['Gust', 'Tornado', 'Wind Shield'],
239
+ 'electric': ['Thunder Shock', 'Static Field', 'Lightning Speed'],
240
+ 'ice': ['Ice Beam', 'Frost Armor', 'Blizzard'],
241
+ 'nature': ['Vine Whip', 'Healing Bloom', 'Nature\'s Guard'],
242
+ 'dark': ['Shadow Strike', 'Dark Pulse', 'Void Shield'],
243
+ 'light': ['Light Beam', 'Healing Light', 'Radiant Shield'],
244
+ 'neutral': ['Tackle', 'Defense Curl', 'Focus']
245
+ }
246
+
247
+ abilities = random.sample(
248
+ element_abilities.get(element, element_abilities['neutral']),
249
+ 2
250
+ )
251
+
252
+ return abilities
253
+
254
+ def _get_color_scheme(self, element: str) -> str:
255
+ """Get color scheme based on element"""
256
+ color_schemes = {
257
+ 'fire': 'red and orange with yellow accents',
258
+ 'water': 'blue and cyan with white highlights',
259
+ 'earth': 'brown and green with stone textures',
260
+ 'wind': 'white and light blue with swirling patterns',
261
+ 'electric': 'yellow and blue with sparking effects',
262
+ 'ice': 'light blue and white with crystalline features',
263
+ 'nature': 'green and brown with leaf patterns',
264
+ 'dark': 'black and purple with shadow effects',
265
+ 'light': 'white and gold with glowing aura',
266
+ 'neutral': 'gray and silver with balanced tones'
267
+ }
268
+
269
+ return color_schemes.get(element, 'varied colors with unique patterns')
270
+
271
+ def _generate_fallback_traits(self, description: str) -> Dict[str, Any]:
272
+ """Generate fallback traits if model fails"""
273
+ element = random.choice(self.trait_categories['elements'])
274
+ personality = random.choice(self.trait_categories['personalities'])
275
+
276
+ return {
277
+ 'name': self._generate_name(),
278
+ 'species': 'Digital Monster',
279
+ 'element': element,
280
+ 'personality': personality,
281
+ 'appearance': f"A unique {random.choice(self.trait_categories['sizes'])} digital creature",
282
+ 'color_scheme': self._get_color_scheme(element),
283
+ 'abilities': self._extract_abilities("", element),
284
+ 'description': description
285
+ }
286
+
287
+ def to(self, device: str):
288
+ """Move model to specified device"""
289
+ self.device = device
290
+ if self.model:
291
+ self.model.to(device)
292
+
293
+ def __del__(self):
294
+ """Cleanup when object is destroyed"""
295
+ if self.model:
296
+ del self.model
297
+ if self.tokenizer:
298
+ del self.tokenizer
299
+ torch.cuda.empty_cache()
requirements.txt CHANGED
@@ -1,57 +1,48 @@
1
- # Core ML Framework - Latest optimized versions
2
- transformers>=4.52.4 # Latest stable, supports Qwen 2.5
3
- torch>=2.2.0 # PyTorch 2.0+ for torch.compile
4
- torchaudio>=2.2.0
5
- diffusers>=0.30.0 # For OmniGen and other diffusion models
6
- # gradio>=5.34.2 # Replaced with Streamlit
7
-
8
- # Qwen 2.5 Optimization Stack
9
- # auto-gptq>=0.7.1 # Removed - not needed, using BitsAndBytesConfig instead
10
- optimum>=1.16.0
11
- accelerate>=0.26.1
12
- bitsandbytes>=0.42.0
13
- # FlashAttention2 will be installed at runtime if GPU is available
14
-
15
- # Enhanced Audio Processing - Kyutai STT
 
 
 
 
 
 
16
  soundfile>=0.12.1
17
- webrtcvad>=2.0.10
18
- # Note: transformers and torch/torchaudio above provide Kyutai STT support
19
 
20
- # Production Backend
21
- fastapi>=0.108.0
22
- uvicorn[standard]>=0.25.0
23
- pydantic>=2.5.0
24
- websockets>=12.0
25
- streamlit>=1.28.0 # Modern UI framework replacing Gradio
26
 
27
- # Advanced State Management
28
- apscheduler>=3.10.4
29
- aiosqlite>=0.19.0
 
30
 
31
- # Zero GPU Optimization (kept for speech engine compatibility)
32
- spaces>=0.28.0
33
-
34
- # 3D Generation Pipeline Dependencies
35
- gradio_client>=0.8.0 # For Hunyuan3D Space API integration
36
- trimesh>=4.0.0 # 3D mesh processing
37
- aiohttp>=3.9.0 # Async HTTP for API calls
38
-
39
- # Core Utilities
40
  numpy>=1.24.0
41
- pandas>=2.1.0
42
- pillow>=10.1.0
43
- python-dateutil>=2.8.2
44
- emoji>=2.8.0
45
- psutil>=5.9.0
46
-
47
- # Async Support
48
- aiofiles>=23.2.0
49
- asyncio-mqtt>=0.16.1
50
-
51
- # Scientific Computing
52
  scipy>=1.11.0
53
  scikit-learn>=1.3.0
54
 
55
- # Development Tools
56
- pytest>=7.4.0
57
- black>=23.0.0
 
 
 
 
 
 
 
 
1
+ # Core dependencies
2
+ gradio>=4.16.0
3
+ spaces>=0.19.0
4
+
5
+ # AI/ML frameworks
6
+ torch>=2.1.0
7
+ torchvision>=0.16.0
8
+ torchaudio>=2.1.0
9
+ transformers>=4.36.0
10
+ diffusers>=0.24.0
11
+ accelerate>=0.25.0
12
+ bitsandbytes>=0.41.0
13
+
14
+ # Model-specific dependencies
15
+ huggingface-hub>=0.20.0
16
+ safetensors>=0.4.1
17
+ sentencepiece>=0.1.99
18
+ tokenizers>=0.15.0
19
+
20
+ # Audio processing
21
+ librosa>=0.10.1
22
  soundfile>=0.12.1
 
 
23
 
24
+ # Image processing
25
+ Pillow>=10.0.0
26
+ opencv-python>=4.8.0
27
+ rembg>=2.0.50
 
 
28
 
29
+ # 3D processing
30
+ trimesh>=4.0.0
31
+ numpy-stl>=3.1.1
32
+ pygltflib>=1.16.1
33
 
34
+ # Scientific computing
 
 
 
 
 
 
 
 
35
  numpy>=1.24.0
 
 
 
 
 
 
 
 
 
 
 
36
  scipy>=1.11.0
37
  scikit-learn>=1.3.0
38
 
39
+ # Utilities
40
+ python-dateutil>=2.8.2
41
+ tqdm>=4.66.0
42
+ pyyaml>=6.0.1
43
+ requests>=2.31.0
44
+ aiofiles>=23.2.1
45
+
46
+ # Optional optimizations
47
+ # onnxruntime-gpu>=1.16.0 # For ONNX model support
48
+ # xformers>=0.0.23 # For memory-efficient attention
run_digipal.py DELETED
@@ -1,80 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- DigiPal Launcher Script
4
- Starts both FastAPI backend and Streamlit frontend
5
- """
6
-
7
- import subprocess
8
- import time
9
- import sys
10
- import os
11
- import threading
12
- import logging
13
-
14
- # Configure logging
15
- logging.basicConfig(
16
- level=logging.INFO,
17
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
18
- )
19
- logger = logging.getLogger(__name__)
20
-
21
- def start_fastapi():
22
- """Start FastAPI backend server"""
23
- logger.info("Starting FastAPI backend server...")
24
- try:
25
- subprocess.run([sys.executable, "app.py"], check=True)
26
- except subprocess.CalledProcessError as e:
27
- logger.error(f"FastAPI server failed: {e}")
28
- except KeyboardInterrupt:
29
- logger.info("FastAPI server stopped")
30
-
31
- def start_streamlit():
32
- """Start Streamlit frontend"""
33
- logger.info("Starting Streamlit frontend...")
34
- try:
35
- port = os.getenv("STREAMLIT_PORT", "8501")
36
- subprocess.run([
37
- sys.executable, "-m", "streamlit", "run",
38
- "src/ui/streamlit_interface.py",
39
- "--server.port", port,
40
- "--server.address", "0.0.0.0"
41
- ], check=True)
42
- except subprocess.CalledProcessError as e:
43
- logger.error(f"Streamlit frontend failed: {e}")
44
- except KeyboardInterrupt:
45
- logger.info("Streamlit frontend stopped")
46
-
47
- def main():
48
- """Main launcher function"""
49
- logger.info("🐉 DigiPal - Advanced AI Monster Companion")
50
- logger.info("=" * 60)
51
- logger.info("Starting both FastAPI backend and Streamlit frontend...")
52
- api_port = os.getenv("API_PORT", "7861")
53
- streamlit_port = os.getenv("STREAMLIT_PORT", "8501")
54
- logger.info(f"FastAPI Backend: http://localhost:{api_port}")
55
- logger.info(f"Streamlit Frontend: http://localhost:{streamlit_port}")
56
- logger.info("=" * 60)
57
-
58
- # Create necessary directories
59
- os.makedirs("data/saves", exist_ok=True)
60
- os.makedirs("data/models", exist_ok=True)
61
- os.makedirs("data/cache", exist_ok=True)
62
- os.makedirs("logs", exist_ok=True)
63
-
64
- try:
65
- # Start FastAPI in a separate thread
66
- fastapi_thread = threading.Thread(target=start_fastapi, daemon=True)
67
- fastapi_thread.start()
68
-
69
- # Give FastAPI time to start
70
- time.sleep(3)
71
-
72
- # Start Streamlit (this will block)
73
- start_streamlit()
74
-
75
- except KeyboardInterrupt:
76
- logger.info("Shutting down DigiPal...")
77
- sys.exit(0)
78
-
79
- if __name__ == "__main__":
80
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/ai/__init__.py DELETED
@@ -1 +0,0 @@
1
- # AI module initialization
 
 
src/ai/qwen_processor.py DELETED
@@ -1,621 +0,0 @@
1
- import torch
2
- from transformers import (
3
- AutoModelForCausalLM,
4
- AutoTokenizer,
5
- pipeline,
6
- BitsAndBytesConfig
7
- )
8
- # GPTQConfig is no longer needed - we'll use BitsAndBytesConfig for quantization
9
- import asyncio
10
- import logging
11
- from typing import Dict, List, Optional, Any
12
- import json
13
- import time
14
- from dataclasses import dataclass
15
- import spaces
16
- from datetime import datetime
17
-
18
- # Check for optional FlashAttention2 availability
19
- try:
20
- import flash_attn
21
- FLASH_ATTN_AVAILABLE = True
22
- except ImportError:
23
- FLASH_ATTN_AVAILABLE = False
24
-
25
- @dataclass
26
- class ModelConfig:
27
- model_name: str
28
- max_memory_gb: float
29
- inference_speed: str # "fast", "balanced", "quality"
30
- use_quantization: bool = True
31
- use_flash_attention: bool = True
32
-
33
- class QwenProcessor:
34
- def __init__(self, config: ModelConfig):
35
- self.config = config
36
- self.logger = logging.getLogger(__name__)
37
-
38
- # Model configurations for different performance tiers
39
- self.model_configs = {
40
- "fast": {
41
- "model_name": "Qwen/Qwen2.5-1.5B-Instruct",
42
- "torch_dtype": torch.bfloat16,
43
- "device_map": "auto",
44
- "attn_implementation": "flash_attention_2" if FLASH_ATTN_AVAILABLE else None,
45
- "max_memory_gb": 4
46
- },
47
- "balanced": {
48
- "model_name": "Qwen/Qwen2.5-3B-Instruct",
49
- "torch_dtype": torch.bfloat16,
50
- "device_map": "auto",
51
- "attn_implementation": "flash_attention_2" if FLASH_ATTN_AVAILABLE else None,
52
- "max_memory_gb": 8
53
- },
54
- "quality": {
55
- "model_name": "Qwen/Qwen2.5-7B-Instruct",
56
- "torch_dtype": torch.bfloat16,
57
- "device_map": "auto",
58
- "attn_implementation": "flash_attention_2" if FLASH_ATTN_AVAILABLE else None,
59
- "max_memory_gb": 16
60
- }
61
- }
62
-
63
- self.model = None
64
- self.tokenizer = None
65
- self.pipeline = None
66
- self.conversation_cache = {}
67
-
68
- # Performance tracking
69
- self.inference_times = []
70
- self.memory_usage = []
71
-
72
- def _try_install_flash_attention(self) -> bool:
73
- """Try to install FlashAttention2 at runtime if GPU is available"""
74
- global FLASH_ATTN_AVAILABLE
75
-
76
- # If already available, no need to install
77
- if FLASH_ATTN_AVAILABLE:
78
- return True
79
-
80
- # Only attempt installation if GPU is available
81
- if not torch.cuda.is_available():
82
- self.logger.info("GPU not available, skipping FlashAttention2 installation")
83
- return False
84
-
85
- try:
86
- self.logger.info("Attempting to install FlashAttention2 at runtime...")
87
-
88
- # Try simple pip install first
89
- import subprocess
90
- import sys
91
-
92
- # Try installing with different approaches
93
- commands = [
94
- # Try with pre-compiled wheel
95
- [sys.executable, "-m", "pip", "install", "flash-attn", "--prefer-binary", "--no-build-isolation"],
96
- # Fallback to regular installation
97
- [sys.executable, "-m", "pip", "install", "flash-attn", "--no-deps"],
98
- ]
99
-
100
- for cmd in commands:
101
- try:
102
- result = subprocess.run(cmd, capture_output=True, text=True, timeout=180)
103
- if result.returncode == 0:
104
- # Try importing to verify installation
105
- try:
106
- import flash_attn
107
- FLASH_ATTN_AVAILABLE = True
108
- self.logger.info("FlashAttention2 installed successfully!")
109
- return True
110
- except ImportError:
111
- continue
112
- else:
113
- self.logger.debug(f"Installation attempt failed: {result.stderr}")
114
- continue
115
- except subprocess.TimeoutExpired:
116
- self.logger.warning("FlashAttention2 installation timed out")
117
- continue
118
- except Exception as e:
119
- self.logger.debug(f"Installation attempt error: {e}")
120
- continue
121
-
122
- self.logger.info("FlashAttention2 installation failed, will use default attention mechanism")
123
-
124
- except Exception as e:
125
- self.logger.warning(f"Could not attempt FlashAttention2 installation: {e}")
126
-
127
- return False
128
-
129
- async def initialize(self):
130
- """Initialize the Qwen 2.5 model with optimizations"""
131
- try:
132
- model_config = self.model_configs[self.config.inference_speed]
133
-
134
- # Enhanced device detection for local vs Spaces environments
135
- is_cpu_only = not torch.cuda.is_available()
136
- is_spaces_gpu = False
137
-
138
- # Check if we're in Spaces with GPU
139
- if torch.cuda.is_available():
140
- try:
141
- # Test if we can actually use GPU
142
- torch.cuda.current_device()
143
- torch.cuda.empty_cache()
144
- is_spaces_gpu = True
145
- is_cpu_only = False
146
- self.logger.info("GPU detected and accessible - using GPU acceleration")
147
-
148
- # Try to install FlashAttention2 at runtime if not available
149
- if not FLASH_ATTN_AVAILABLE:
150
- self._try_install_flash_attention()
151
-
152
- except Exception as e:
153
- self.logger.warning(f"GPU detected but not accessible: {e} - falling back to CPU")
154
- is_cpu_only = True
155
- is_spaces_gpu = False
156
- else:
157
- self.logger.info("No GPU detected - using CPU only")
158
-
159
- # Quantization configuration - optimize based on environment
160
- if self.config.use_quantization and not is_cpu_only:
161
- try:
162
- quantization_config = BitsAndBytesConfig(
163
- load_in_4bit=True,
164
- bnb_4bit_compute_dtype=torch.bfloat16,
165
- bnb_4bit_use_double_quant=True,
166
- bnb_4bit_quant_type="nf4"
167
- )
168
- self.logger.info("4-bit quantization enabled for GPU")
169
- except Exception as e:
170
- self.logger.warning(f"Quantization failed, falling back to full precision: {e}")
171
- quantization_config = None
172
- else:
173
- quantization_config = None
174
- if is_cpu_only:
175
- self.logger.info("CPU-only environment detected, disabling quantization")
176
-
177
- # Load tokenizer
178
- self.tokenizer = AutoTokenizer.from_pretrained(
179
- model_config["model_name"],
180
- trust_remote_code=True,
181
- use_fast=True
182
- )
183
-
184
- # Adjust model configuration based on environment
185
- model_kwargs = {
186
- "trust_remote_code": True,
187
- "use_cache": True,
188
- "low_cpu_mem_usage": True
189
- }
190
-
191
- if is_cpu_only:
192
- # CPU-only optimizations
193
- model_kwargs.update({
194
- "torch_dtype": torch.float32, # Use float32 for CPU compatibility
195
- "device_map": "cpu"
196
- })
197
- self.logger.info("Loading model for CPU-only environment")
198
- else:
199
- # GPU optimizations - use FlashAttention2 when available
200
- use_flash_attention = (self.config.use_flash_attention and
201
- is_spaces_gpu and
202
- FLASH_ATTN_AVAILABLE)
203
-
204
- if use_flash_attention:
205
- attn_implementation = model_config["attn_implementation"]
206
- self.logger.info("Loading model for GPU environment with FlashAttention2")
207
- else:
208
- attn_implementation = None # Use default attention
209
- if self.config.use_flash_attention and not FLASH_ATTN_AVAILABLE:
210
- self.logger.info("FlashAttention2 requested but not available, using default attention")
211
- else:
212
- self.logger.info("Loading model for GPU environment with default attention")
213
-
214
- model_kwargs.update({
215
- "torch_dtype": model_config["torch_dtype"],
216
- "device_map": model_config["device_map"],
217
- "attn_implementation": attn_implementation
218
- })
219
-
220
- if quantization_config is not None:
221
- model_kwargs["quantization_config"] = quantization_config
222
-
223
- # Load model with optimizations
224
- import os
225
- if os.getenv("SPACE_ID") and not is_cpu_only:
226
- # Use GPU wrapper for ZeroGPU compatibility
227
- self.model = gpu_model_initialization(
228
- AutoModelForCausalLM,
229
- model_config["model_name"],
230
- **model_kwargs
231
- )
232
- else:
233
- # Direct model loading for local environments
234
- self.model = AutoModelForCausalLM.from_pretrained(
235
- model_config["model_name"],
236
- **model_kwargs
237
- )
238
-
239
- # Compile model for faster inference (PyTorch 2.0+) - only on GPU
240
- if hasattr(torch, "compile") and not is_cpu_only:
241
- try:
242
- self.model = torch.compile(self.model, mode="reduce-overhead")
243
- self.logger.info("Model compiled with torch.compile for faster inference")
244
- except Exception as e:
245
- self.logger.warning(f"Model compilation failed: {e}")
246
-
247
- # Create pipeline with appropriate device mapping
248
- pipeline_kwargs = {
249
- "task": "text-generation",
250
- "model": self.model,
251
- "tokenizer": self.tokenizer,
252
- "batch_size": 1,
253
- "return_full_text": False
254
- }
255
-
256
- if is_cpu_only:
257
- pipeline_kwargs["device"] = -1 # CPU device for pipeline
258
- # Do not pass device_map when model is already loaded with accelerate
259
-
260
- self.pipeline = pipeline(**pipeline_kwargs)
261
-
262
- self.logger.info(f"Qwen 2.5 model initialized: {model_config['model_name']} ({'GPU' if not is_cpu_only else 'CPU'})")
263
-
264
- except Exception as e:
265
- self.logger.error(f"Failed to initialize Qwen model: {e}")
266
- raise
267
-
268
- async def generate_monster_response(self,
269
- monster_data: Dict[str, Any],
270
- user_input: str,
271
- conversation_history: List[Dict[str, str]] = None) -> Dict[str, Any]:
272
- """Generate contextual response based on monster personality and state"""
273
- start_time = time.time()
274
-
275
- if conversation_history is None:
276
- conversation_history = []
277
-
278
- try:
279
- # Build context from monster data
280
- context = self._build_context(monster_data, conversation_history)
281
-
282
- # Generate appropriate prompt based on monster state
283
- prompt = self._generate_prompt(context, user_input)
284
-
285
- # Configure generation parameters based on monster personality
286
- generation_params = self._get_generation_params(monster_data)
287
-
288
- # Generate response using the pipeline
289
- response = await self._generate_response(prompt, generation_params)
290
-
291
- # Post-process response based on monster personality
292
- processed_response = self._post_process_response(
293
- response,
294
- monster_data
295
- )
296
-
297
- # Calculate emotional impact
298
- emotional_impact = self._calculate_emotional_impact(
299
- user_input,
300
- processed_response,
301
- monster_data
302
- )
303
-
304
- # Track memory usage
305
- if torch.cuda.is_available():
306
- memory_used = torch.cuda.max_memory_allocated() / 1024**3
307
- self.memory_usage.append({
308
- "timestamp": datetime.now().isoformat(),
309
- "memory_gb": memory_used
310
- })
311
-
312
- inference_time = time.time() - start_time
313
-
314
- return {
315
- "response": processed_response,
316
- "emotional_impact": emotional_impact,
317
- "inference_time": inference_time,
318
- "model_used": self.config.model_name,
319
- "context_length": len(prompt)
320
- }
321
-
322
- except Exception as e:
323
- self.logger.error(f"Response generation failed: {e}")
324
- # Return a fallback response
325
- return {
326
- "response": self._get_fallback_response(monster_data),
327
- "emotional_impact": {"neutral": 1.0},
328
- "inference_time": 0.0,
329
- "error": str(e)
330
- }
331
-
332
- def _build_personality_prompt(self, monster_data: Dict[str, Any]) -> str:
333
- """Build personality description for the monster"""
334
- personality = monster_data.get('personality', {})
335
-
336
- # Core personality traits
337
- primary_type = personality.get('primary_type', 'playful')
338
- traits = []
339
-
340
- # Big Five personality factors
341
- if personality.get('extraversion', 0.5) > 0.7:
342
- traits.append("very outgoing and social")
343
- elif personality.get('extraversion', 0.5) < 0.3:
344
- traits.append("more reserved and introspective")
345
-
346
- if personality.get('agreeableness', 0.5) > 0.7:
347
- traits.append("extremely friendly and cooperative")
348
- elif personality.get('agreeableness', 0.5) < 0.3:
349
- traits.append("more independent and sometimes stubborn")
350
-
351
- if personality.get('conscientiousness', 0.5) > 0.7:
352
- traits.append("very disciplined and organized")
353
- elif personality.get('conscientiousness', 0.5) < 0.3:
354
- traits.append("more spontaneous and carefree")
355
-
356
- if personality.get('openness', 0.5) > 0.7:
357
- traits.append("very curious and imaginative")
358
- elif personality.get('openness', 0.5) < 0.3:
359
- traits.append("more practical and traditional")
360
-
361
- # Learned preferences
362
- favorites = personality.get('favorite_foods', [])
363
- dislikes = personality.get('disliked_foods', [])
364
-
365
- personality_text = f"Personality Type: {primary_type.title()}\n"
366
-
367
- if traits:
368
- personality_text += f"You are {', '.join(traits)}.\n"
369
-
370
- if favorites:
371
- personality_text += f"Your favorite foods are: {', '.join(favorites[:3])}.\n"
372
-
373
- if dislikes:
374
- personality_text += f"You dislike: {', '.join(dislikes[:3])}.\n"
375
-
376
- # Relationship context
377
- relationship_level = personality.get('relationship_level', 0)
378
- if relationship_level > 80:
379
- personality_text += "You have a very strong bond with your caretaker.\n"
380
- elif relationship_level > 50:
381
- personality_text += "You trust and like your caretaker.\n"
382
- elif relationship_level > 20:
383
- personality_text += "You're getting to know your caretaker.\n"
384
- else:
385
- personality_text += "You're still warming up to your caretaker.\n"
386
-
387
- return personality_text
388
-
389
- def _build_conversation_context(self,
390
- history: List[Dict[str, str]],
391
- monster_data: Dict[str, Any]) -> str:
392
- """Build conversation context from recent history"""
393
- if not history:
394
- return "This is your first conversation together."
395
-
396
- # Get recent messages (last 3 exchanges)
397
- recent_history = history[-6:] if len(history) > 6 else history
398
-
399
- context = "Recent conversation:\n"
400
- for i, msg in enumerate(recent_history):
401
- if msg.get('role') == 'user':
402
- context += f"Human: {msg.get('content', '')}\n"
403
- else:
404
- context += f"You: {msg.get('content', '')}\n"
405
-
406
- return context
407
-
408
- def _post_process_response(self, response: str, monster_data: Dict[str, Any]) -> str:
409
- """Post-process the generated response"""
410
- # Remove any unwanted prefixes/suffixes
411
- response = response.strip()
412
-
413
- # Remove common artifacts
414
- unwanted_prefixes = ["Assistant:", "Monster:", "DigiPal:", monster_data['name'] + ":"]
415
- for prefix in unwanted_prefixes:
416
- if response.startswith(prefix):
417
- response = response[len(prefix):].strip()
418
-
419
- # Ensure appropriate length
420
- sentences = response.split('.')
421
- if len(sentences) > 2:
422
- response = '. '.join(sentences[:2]) + '.'
423
-
424
- # Add emojis if missing
425
- if not self._has_emoji(response):
426
- response = self._add_contextual_emoji(response, monster_data)
427
-
428
- return response
429
-
430
- def _has_emoji(self, text: str) -> bool:
431
- """Check if text contains emojis"""
432
- import emoji
433
- return bool(emoji.emoji_count(text))
434
-
435
- def _add_contextual_emoji(self, response: str, monster_data: Dict[str, Any]) -> str:
436
- """Add appropriate emoji based on context"""
437
- emotional_state = monster_data.get('emotional_state', 'neutral')
438
-
439
- emoji_map = {
440
- 'ecstatic': ' 🤩',
441
- 'happy': ' 😊',
442
- 'content': ' 😌',
443
- 'neutral': ' 🙂',
444
- 'melancholy': ' 😔',
445
- 'sad': ' 😢',
446
- 'angry': ' 😠',
447
- 'sick': ' 🤒',
448
- 'excited': ' 😆',
449
- 'tired': ' 😴'
450
- }
451
-
452
- return response + emoji_map.get(emotional_state, ' 🙂')
453
-
454
- def _analyze_emotional_impact(self, user_input: str, response: str) -> Dict[str, float]:
455
- """Analyze the emotional impact of the interaction"""
456
- # Simple keyword-based analysis (can be enhanced with sentiment models)
457
- positive_keywords = ['love', 'good', 'great', 'amazing', 'wonderful', 'happy', 'fun']
458
- negative_keywords = ['bad', 'sad', 'angry', 'hate', 'terrible', 'awful', 'sick']
459
-
460
- user_input_lower = user_input.lower()
461
-
462
- impact = {
463
- 'happiness': 0.0,
464
- 'stress': 0.0,
465
- 'bonding': 0.0
466
- }
467
-
468
- # Analyze user input sentiment
469
- for keyword in positive_keywords:
470
- if keyword in user_input_lower:
471
- impact['happiness'] += 0.1
472
- impact['bonding'] += 0.05
473
-
474
- for keyword in negative_keywords:
475
- if keyword in user_input_lower:
476
- impact['happiness'] -= 0.1
477
- impact['stress'] += 0.1
478
-
479
- # Base interaction bonus
480
- impact['bonding'] += 0.02 # Small bonding increase for any interaction
481
-
482
- return impact
483
-
484
- def _get_fallback_response(self, monster_data: Dict[str, Any]) -> str:
485
- """Get fallback response when AI generation fails"""
486
- fallback_responses = [
487
- f"*{monster_data['name']} looks at you curiously* 🤔",
488
- f"*{monster_data['name']} makes a happy sound* 😊",
489
- f"*{monster_data['name']} tilts head thoughtfully* 💭",
490
- f"*{monster_data['name']} seems interested* 👀"
491
- ]
492
-
493
- import random
494
- return random.choice(fallback_responses)
495
-
496
- def get_performance_stats(self) -> Dict[str, Any]:
497
- """Get model performance statistics"""
498
- if not self.inference_times:
499
- return {"status": "No inference data available"}
500
-
501
- avg_time = sum(self.inference_times) / len(self.inference_times)
502
-
503
- return {
504
- "average_inference_time": avg_time,
505
- "total_inferences": len(self.inference_times),
506
- "fastest_inference": min(self.inference_times),
507
- "slowest_inference": max(self.inference_times),
508
- "tokens_per_second": 128 / avg_time, # Approximate
509
- "model_config": self.config.__dict__
510
- }
511
-
512
- def _build_context(self, monster_data: Dict[str, Any], conversation_history: List[Dict[str, str]]) -> str:
513
- """Build complete context from monster data and conversation history"""
514
- personality_prompt = self._build_personality_prompt(monster_data)
515
- conversation_context = self._build_conversation_context(conversation_history, monster_data)
516
-
517
- context = f"""You are {monster_data['name']}, a virtual monster companion.
518
-
519
- {personality_prompt}
520
-
521
- Current State:
522
- - Health: {monster_data['stats']['health']}/100
523
- - Happiness: {monster_data['stats']['happiness']}/100
524
- - Energy: {monster_data['stats']['energy']}/100
525
- - Emotional State: {monster_data['emotional_state']}
526
- - Activity: {monster_data['current_activity']}
527
-
528
- Instructions:
529
- - Respond as this specific monster with this personality
530
- - Keep responses to 1-2 sentences maximum
531
- - Include 1-2 relevant emojis
532
- - Show personality through word choice and tone
533
- - React appropriately to your current stats and emotional state
534
- - Remember past conversations and build on them
535
-
536
- {conversation_context}"""
537
-
538
- return context
539
-
540
- def _generate_prompt(self, context: str, user_input: str) -> str:
541
- """Generate the final prompt for the model"""
542
- messages = [
543
- {"role": "system", "content": context},
544
- {"role": "user", "content": user_input}
545
- ]
546
-
547
- # Format messages for Qwen 2.5
548
- prompt = self.tokenizer.apply_chat_template(
549
- messages,
550
- tokenize=False,
551
- add_generation_prompt=True
552
- )
553
-
554
- return prompt
555
-
556
- def _get_generation_params(self, monster_data: Dict[str, Any]) -> Dict[str, Any]:
557
- """Get generation parameters based on monster personality"""
558
- personality_type = monster_data.get("personality", {}).get("type", "playful")
559
-
560
- # Base parameters
561
- params = {
562
- "max_new_tokens": 128,
563
- "temperature": 0.8,
564
- "top_p": 0.9,
565
- "top_k": 50,
566
- "do_sample": True,
567
- "pad_token_id": self.tokenizer.eos_token_id,
568
- "repetition_penalty": 1.1,
569
- "no_repeat_ngram_size": 3
570
- }
571
-
572
- # Adjust based on personality
573
- if personality_type == "energetic":
574
- params["temperature"] = 0.9
575
- params["top_p"] = 0.95
576
- elif personality_type == "wise":
577
- params["temperature"] = 0.7
578
- params["top_p"] = 0.85
579
- elif personality_type == "mysterious":
580
- params["temperature"] = 0.85
581
- params["top_k"] = 40
582
-
583
- return params
584
-
585
- async def _generate_response(self, prompt: str, generation_params: Dict[str, Any]) -> str:
586
- """Generate response using the pipeline"""
587
- try:
588
- # Check if we're in Spaces environment and GPU is available
589
- import os
590
- if os.getenv("SPACE_ID") and torch.cuda.is_available():
591
- # Use GPU wrapper function for ZeroGPU compatibility
592
- response_text = gpu_generate_response(self, prompt, generation_params)
593
- else:
594
- # Direct pipeline call for local/CPU environments
595
- outputs = self.pipeline(prompt, **generation_params)
596
- response_text = outputs[0]["generated_text"].strip()
597
- return response_text
598
- except Exception as e:
599
- self.logger.error(f"Pipeline generation failed: {e}")
600
- raise
601
-
602
- def _calculate_emotional_impact(self, user_input: str, response: str, monster_data: Dict[str, Any]) -> Dict[str, float]:
603
- """Calculate the emotional impact of the interaction"""
604
- return self._analyze_emotional_impact(user_input, response)
605
-
606
- # ZeroGPU wrapper functions
607
- @spaces.GPU(duration=120)
608
- def gpu_generate_response(processor, prompt: str, generation_params: Dict[str, Any]) -> str:
609
- """GPU-accelerated response generation wrapper"""
610
- try:
611
- outputs = processor.pipeline(prompt, **generation_params)
612
- response_text = outputs[0]["generated_text"].strip()
613
- return response_text
614
- except Exception as e:
615
- logging.getLogger(__name__).error(f"GPU generation failed: {e}")
616
- raise
617
-
618
- @spaces.GPU(duration=60)
619
- def gpu_model_initialization(model_class, model_name: str, **kwargs) -> Any:
620
- """GPU-accelerated model initialization wrapper"""
621
- return model_class.from_pretrained(model_name, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/ai/speech_engine.py DELETED
@@ -1,470 +0,0 @@
1
- import asyncio
2
- import numpy as np
3
- import torch
4
- import torchaudio
5
- from transformers import pipeline, AutoModelForSpeechSeq2Seq, AutoProcessor
6
- import webrtcvad
7
- import logging
8
- from typing import Dict, List, Optional, Tuple, Any
9
- import time
10
- from dataclasses import dataclass
11
- import io
12
- import wave
13
- import spaces
14
-
15
- @dataclass
16
- class SpeechConfig:
17
- model_name: str = "kyutai/stt-2.6b-en" # Kyutai STT model
18
- device: str = "auto"
19
- torch_dtype: str = "float16"
20
- use_vad: bool = True
21
- vad_aggressiveness: int = 2 # 0-3, higher = more aggressive
22
- chunk_duration_ms: int = 30 # VAD chunk size
23
- sample_rate: int = 16000
24
- use_pipeline: bool = True # Use transformers pipeline for easier integration
25
-
26
- class AdvancedSpeechEngine:
27
- def __init__(self, config: SpeechConfig):
28
- self.config = config
29
- self.logger = logging.getLogger(__name__)
30
-
31
- # Kyutai STT model configurations
32
- self.model_info = {
33
- "name": "Kyutai STT-2.6B-EN",
34
- "description": "Multilingual speech-to-text model optimized for English",
35
- "memory_gb": 6, # Approximate memory requirement for 2.6B model
36
- "speed": "fast",
37
- "accuracy": "high"
38
- }
39
-
40
- self.speech_pipeline = None
41
- self.model = None
42
- self.processor = None
43
- self.vad_model = None
44
-
45
- # Performance tracking
46
- self.transcription_times = []
47
- self.accuracy_scores = []
48
-
49
- # Audio processing
50
- self.audio_buffer = []
51
- self.is_processing = False
52
-
53
- async def initialize(self):
54
- """Initialize the Kyutai STT speech recognition system"""
55
- try:
56
- # Enhanced device detection for local vs Spaces environments
57
- device = self.config.device
58
- torch_dtype = self.config.torch_dtype
59
-
60
- if device == "auto":
61
- # For Zero GPU environments, try GPU first, fallback to CPU
62
- if torch.cuda.is_available():
63
- try:
64
- # Test CUDA availability properly
65
- torch.cuda.current_device()
66
- torch.cuda.empty_cache()
67
- device = "cuda"
68
- self.logger.info("GPU detected and accessible for speech processing")
69
- except Exception as cuda_error:
70
- # CUDA not properly accessible, use CPU
71
- device = "cpu"
72
- if torch_dtype == "float16":
73
- torch_dtype = "float32"
74
- self.logger.info(f"CUDA not accessible ({cuda_error}), using CPU with float32")
75
- else:
76
- device = "cpu"
77
- if torch_dtype == "float16":
78
- torch_dtype = "float32"
79
- self.logger.info("CUDA not available, using CPU with float32")
80
-
81
- # Adjust torch_dtype for CPU
82
- if device == "cpu" and torch_dtype == "float16":
83
- torch_dtype = "float32" # Use float32 for CPU instead of float16
84
- self.logger.info("CPU detected, switching from float16 to float32 dtype")
85
-
86
- # Convert string dtype to torch dtype
87
- dtype_map = {
88
- "float16": torch.float16,
89
- "float32": torch.float32,
90
- "bfloat16": torch.bfloat16
91
- }
92
- torch_dtype_obj = dtype_map.get(torch_dtype, torch.float32)
93
-
94
- # Initialize Kyutai STT with proper error handling
95
- try:
96
- if self.config.use_pipeline:
97
- # Use transformers pipeline for easier integration
98
- self.speech_pipeline = pipeline(
99
- "automatic-speech-recognition",
100
- model=self.config.model_name,
101
- torch_dtype=torch_dtype_obj,
102
- device=device,
103
- cache_dir="data/models/"
104
- )
105
- self.logger.info(f"Kyutai STT pipeline loaded on {device} with {torch_dtype}")
106
- else:
107
- # Load model and processor separately for more control
108
- self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
109
- self.config.model_name,
110
- torch_dtype=torch_dtype_obj,
111
- device_map="auto" if device == "cuda" else None,
112
- cache_dir="data/models/"
113
- )
114
- self.processor = AutoProcessor.from_pretrained(
115
- self.config.model_name,
116
- cache_dir="data/models/"
117
- )
118
-
119
- if device == "cuda" and not hasattr(self.model, 'device_map'):
120
- self.model = self.model.to(device)
121
-
122
- self.logger.info(f"Kyutai STT model and processor loaded on {device} with {torch_dtype}")
123
-
124
- except Exception as model_error:
125
- # Final fallback to CPU with basic settings
126
- self.logger.warning(f"Failed to load on {device}, falling back to CPU: {model_error}")
127
-
128
- if self.config.use_pipeline:
129
- self.speech_pipeline = pipeline(
130
- "automatic-speech-recognition",
131
- model=self.config.model_name,
132
- torch_dtype=torch.float32,
133
- device="cpu",
134
- cache_dir="data/models/"
135
- )
136
- else:
137
- self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
138
- self.config.model_name,
139
- torch_dtype=torch.float32,
140
- device_map=None,
141
- cache_dir="data/models/"
142
- )
143
- self.processor = AutoProcessor.from_pretrained(
144
- self.config.model_name,
145
- cache_dir="data/models/"
146
- )
147
- self.model = self.model.to("cpu")
148
-
149
- self.logger.info("Kyutai STT model loaded on CPU (fallback)")
150
-
151
- # Initialize VAD if enabled
152
- if self.config.use_vad:
153
- self.vad_model = webrtcvad.Vad(self.config.vad_aggressiveness)
154
-
155
- self.logger.info(f"Kyutai STT speech engine initialized: {self.config.model_name} on {device}")
156
-
157
- except Exception as e:
158
- self.logger.error(f"Failed to initialize Kyutai STT speech engine: {e}")
159
- raise
160
-
161
- async def process_audio_stream(self, audio_data: np.ndarray) -> Dict[str, Any]:
162
- """Process streaming audio for real-time transcription"""
163
- start_time = time.time()
164
-
165
- try:
166
- # Convert audio format if needed
167
- if len(audio_data.shape) > 1:
168
- audio_data = audio_data.mean(axis=1) # Convert to mono
169
-
170
- # Normalize audio
171
- audio_data = audio_data.astype(np.float32)
172
- if np.max(np.abs(audio_data)) > 0:
173
- audio_data = audio_data / np.max(np.abs(audio_data))
174
-
175
- # Voice Activity Detection
176
- if self.config.use_vad:
177
- has_speech = self._detect_speech_activity(audio_data)
178
- if not has_speech:
179
- return {
180
- "success": True,
181
- "transcription": "",
182
- "confidence": 0.0,
183
- "processing_time": time.time() - start_time,
184
- "has_speech": False
185
- }
186
-
187
- # Transcribe with Kyutai STT
188
- if self.config.use_pipeline and self.speech_pipeline:
189
- # Use pipeline for simpler transcription
190
- result = self.speech_pipeline(
191
- audio_data,
192
- generate_kwargs={
193
- "language": "en",
194
- "task": "transcribe",
195
- "max_new_tokens": 256
196
- }
197
- )
198
-
199
- transcription = result["text"].strip()
200
- # Pipeline doesn't provide confidence scores directly
201
- confidence = 0.8 # Default confidence for pipeline
202
-
203
- else:
204
- # Use model and processor for more control
205
- # Prepare inputs
206
- inputs = self.processor(
207
- audio_data,
208
- sampling_rate=self.config.sample_rate,
209
- return_tensors="pt"
210
- )
211
-
212
- # Move inputs to device
213
- device = next(self.model.parameters()).device
214
- inputs = {k: v.to(device) for k, v in inputs.items()}
215
-
216
- # Generate transcription
217
- with torch.no_grad():
218
- generated_tokens = self.model.generate(
219
- **inputs,
220
- language="en",
221
- task="transcribe",
222
- max_new_tokens=256,
223
- num_beams=1, # Faster inference
224
- do_sample=False,
225
- temperature=1.0
226
- )
227
-
228
- # Decode transcription
229
- transcription = self.processor.batch_decode(
230
- generated_tokens,
231
- skip_special_tokens=True
232
- )[0].strip()
233
-
234
- # Calculate confidence (simplified)
235
- confidence = 0.8 # Default confidence
236
-
237
- processing_time = time.time() - start_time
238
- self.transcription_times.append(processing_time)
239
-
240
- # Analyze speech characteristics
241
- speech_analysis = self._analyze_speech_characteristics(audio_data, transcription)
242
-
243
- return {
244
- "success": True,
245
- "transcription": transcription,
246
- "confidence": confidence,
247
- "processing_time": processing_time,
248
- "has_speech": True,
249
- "speech_analysis": speech_analysis,
250
- "detected_language": "en", # Kyutai model is optimized for English
251
- "language_probability": 1.0,
252
- "model": "kyutai-stt-2.6b-en"
253
- }
254
-
255
- except Exception as e:
256
- self.logger.error(f"Audio processing failed: {e}")
257
- return {
258
- "success": False,
259
- "transcription": "",
260
- "confidence": 0.0,
261
- "processing_time": time.time() - start_time,
262
- "error": str(e)
263
- }
264
-
265
- def _detect_speech_activity(self, audio_data: np.ndarray) -> bool:
266
- """Detect if audio contains speech using WebRTC VAD"""
267
- try:
268
- # Convert to 16-bit PCM
269
- pcm_data = (audio_data * 32767).astype(np.int16)
270
-
271
- # Split into chunks for VAD processing
272
- chunk_size = int(self.config.sample_rate * self.config.chunk_duration_ms / 1000)
273
- speech_chunks = 0
274
- total_chunks = 0
275
-
276
- for i in range(0, len(pcm_data), chunk_size):
277
- chunk = pcm_data[i:i+chunk_size]
278
-
279
- # Pad chunk if necessary
280
- if len(chunk) < chunk_size:
281
- chunk = np.pad(chunk, (0, chunk_size - len(chunk)), mode='constant')
282
-
283
- # Convert to bytes
284
- chunk_bytes = chunk.tobytes()
285
-
286
- # Check for speech
287
- if self.vad_model.is_speech(chunk_bytes, self.config.sample_rate):
288
- speech_chunks += 1
289
-
290
- total_chunks += 1
291
-
292
- # Consider it speech if > 30% of chunks contain speech
293
- speech_ratio = speech_chunks / total_chunks if total_chunks > 0 else 0
294
- return speech_ratio > 0.3
295
-
296
- except Exception as e:
297
- self.logger.warning(f"VAD processing failed: {e}")
298
- return True # Default to processing if VAD fails
299
-
300
- def _logprob_to_confidence(self, avg_logprob: float) -> float:
301
- """Convert log probability to confidence score"""
302
- # Empirical mapping from log probability to confidence
303
- # Faster Whisper typically gives log probs between -3.0 and 0.0
304
- confidence = max(0.0, min(1.0, (avg_logprob + 3.0) / 3.0))
305
- return confidence
306
-
307
- def _analyze_speech_characteristics(self, audio_data: np.ndarray, transcription: str) -> Dict[str, Any]:
308
- """Analyze speech characteristics for emotional context"""
309
- try:
310
- import librosa
311
-
312
- # Basic audio features
313
- duration = len(audio_data) / self.config.sample_rate
314
-
315
- # Energy/Volume analysis
316
- rms_energy = np.sqrt(np.mean(audio_data ** 2))
317
-
318
- # Pitch analysis
319
- pitches, magnitudes = librosa.piptrack(
320
- y=audio_data,
321
- sr=self.config.sample_rate,
322
- threshold=0.1
323
- )
324
-
325
- # Extract fundamental frequency
326
- pitch_values = pitches[magnitudes > np.max(magnitudes) * 0.1]
327
- if len(pitch_values) > 0:
328
- avg_pitch = np.mean(pitch_values)
329
- pitch_variance = np.var(pitch_values)
330
- else:
331
- avg_pitch = 0.0
332
- pitch_variance = 0.0
333
-
334
- # Speaking rate (words per minute)
335
- word_count = len(transcription.split()) if transcription else 0
336
- speaking_rate = (word_count / duration * 60) if duration > 0 else 0
337
-
338
- # Emotional indicators (basic)
339
- emotions = {
340
- "excitement": min(1.0, rms_energy * 10), # Higher energy = more excited
341
- "calmness": max(0.0, 1.0 - (pitch_variance / 1000)), # Lower pitch variance = calmer
342
- "engagement": min(1.0, speaking_rate / 200), # Normal speaking rate indicates engagement
343
- "stress": min(1.0, max(0.0, (avg_pitch - 200) / 100)) # Higher pitch can indicate stress
344
- }
345
-
346
- return {
347
- "duration": duration,
348
- "energy": rms_energy,
349
- "average_pitch": avg_pitch,
350
- "pitch_variance": pitch_variance,
351
- "speaking_rate": speaking_rate,
352
- "word_count": word_count,
353
- "emotional_indicators": emotions
354
- }
355
-
356
- except Exception as e:
357
- self.logger.warning(f"Speech analysis failed: {e}")
358
- return {
359
- "duration": 0.0,
360
- "energy": 0.0,
361
- "emotional_indicators": {}
362
- }
363
-
364
- async def batch_transcribe(self, audio_files: List[str]) -> List[Dict[str, Any]]:
365
- """Batch transcribe multiple audio files using Kyutai STT"""
366
- results = []
367
-
368
- for audio_file in audio_files:
369
- try:
370
- # Load audio file - use torchaudio for better PyTorch integration
371
- audio_data, sample_rate = torchaudio.load(audio_file)
372
-
373
- # Convert to numpy and ensure mono
374
- audio_data = audio_data.numpy()
375
- if len(audio_data.shape) > 1:
376
- audio_data = audio_data.mean(axis=0) # Convert to mono
377
-
378
- # Resample if necessary
379
- if sample_rate != self.config.sample_rate:
380
- # Use torchaudio for resampling
381
- audio_tensor = torch.from_numpy(audio_data).unsqueeze(0)
382
- resampler = torchaudio.transforms.Resample(sample_rate, self.config.sample_rate)
383
- audio_tensor = resampler(audio_tensor)
384
- audio_data = audio_tensor.squeeze(0).numpy()
385
-
386
- # Process
387
- result = await self.process_audio_stream(audio_data)
388
- result["file_path"] = audio_file
389
- result["original_sample_rate"] = sample_rate
390
-
391
- results.append(result)
392
-
393
- except Exception as e:
394
- self.logger.error(f"Failed to process {audio_file}: {e}")
395
- results.append({
396
- "success": False,
397
- "file_path": audio_file,
398
- "error": str(e)
399
- })
400
-
401
- return results
402
-
403
- def get_performance_stats(self) -> Dict[str, Any]:
404
- """Get speech processing performance statistics"""
405
- if not self.transcription_times:
406
- return {"status": "No transcription data available"}
407
-
408
- avg_time = sum(self.transcription_times) / len(self.transcription_times)
409
-
410
- return {
411
- "average_processing_time": avg_time,
412
- "total_transcriptions": len(self.transcription_times),
413
- "fastest_transcription": min(self.transcription_times),
414
- "slowest_transcription": max(self.transcription_times),
415
- "model_config": self.config.__dict__,
416
- "estimated_real_time_factor": avg_time / 1.0 # Assuming 1 second audio clips
417
- }
418
-
419
- def optimize_for_hardware(self, available_vram_gb: float) -> SpeechConfig:
420
- """Optimize Kyutai STT config based on available hardware"""
421
- # Kyutai STT-2.6B requires about 6GB VRAM for optimal performance
422
- if available_vram_gb >= 8:
423
- return SpeechConfig(
424
- model_name="kyutai/stt-2.6b-en",
425
- device="cuda",
426
- torch_dtype="float16",
427
- use_vad=True,
428
- use_pipeline=True
429
- )
430
- elif available_vram_gb >= 6:
431
- return SpeechConfig(
432
- model_name="kyutai/stt-2.6b-en",
433
- device="cuda",
434
- torch_dtype="float32",
435
- use_vad=True,
436
- use_pipeline=True
437
- )
438
- elif available_vram_gb >= 4:
439
- return SpeechConfig(
440
- model_name="kyutai/stt-2.6b-en",
441
- device="cuda",
442
- torch_dtype="float32",
443
- use_vad=True,
444
- use_pipeline=False # More memory efficient without pipeline
445
- )
446
- else:
447
- return SpeechConfig(
448
- model_name="kyutai/stt-2.6b-en",
449
- device="cpu",
450
- torch_dtype="float32",
451
- use_vad=True,
452
- use_pipeline=True
453
- )
454
-
455
- # Apply GPU decorator to methods after class definition for ZeroGPU compatibility
456
- try:
457
- import os
458
- if os.getenv("SPACE_ID") is not None:
459
- # We're in Spaces environment, apply GPU decorator for Kyutai STT
460
- AdvancedSpeechEngine.process_audio_stream = spaces.GPU(
461
- AdvancedSpeechEngine.process_audio_stream,
462
- duration=120 # Kyutai STT may take longer than Whisper
463
- )
464
- AdvancedSpeechEngine.batch_transcribe = spaces.GPU(
465
- AdvancedSpeechEngine.batch_transcribe,
466
- duration=300 # Batch processing may take longer
467
- )
468
- except (ImportError, NotImplementedError, AttributeError) as e:
469
- # GPU decorator not available or failed, continue without it
470
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/core/evolution_system.py DELETED
@@ -1,655 +0,0 @@
1
- import asyncio
2
- import logging
3
- from typing import Dict, List, Optional, Any, Tuple
4
- from datetime import datetime, timedelta
5
- from enum import Enum
6
- import random
7
- import json
8
-
9
- from .monster_engine import Monster, EvolutionStage, MonsterPersonalityType, EmotionalState
10
-
11
- class EvolutionTrigger(str, Enum):
12
- TIME_BASED = "time_based"
13
- STAT_BASED = "stat_based"
14
- CARE_BASED = "care_based"
15
- ITEM_BASED = "item_based"
16
- SPECIAL_EVENT = "special_event"
17
- TRAINING_BASED = "training_based"
18
- RELATIONSHIP_BASED = "relationship_based"
19
-
20
- class EvolutionPath(str, Enum):
21
- NORMAL = "normal"
22
- VARIANT = "variant"
23
- SPECIAL = "special"
24
- CORRUPTED = "corrupted"
25
- LEGENDARY = "legendary"
26
-
27
- class EvolutionSystem:
28
- def __init__(self):
29
- self.logger = logging.getLogger(__name__)
30
-
31
- # Evolution trees and requirements
32
- self.evolution_trees = self._initialize_evolution_trees()
33
- self.evolution_requirements = self._initialize_evolution_requirements()
34
- self.special_conditions = self._initialize_special_conditions()
35
-
36
- # Evolution modifiers
37
- self.care_quality_thresholds = {
38
- "excellent": 1.8,
39
- "good": 1.4,
40
- "average": 1.0,
41
- "poor": 0.6,
42
- "terrible": 0.3
43
- }
44
-
45
- def _initialize_evolution_trees(self) -> Dict[str, Dict[str, List[Dict[str, Any]]]]:
46
- """Initialize the complete evolution tree structure"""
47
- return {
48
- "Botamon": {
49
- EvolutionStage.BABY: [
50
- {
51
- "species": "Koromon",
52
- "path": EvolutionPath.NORMAL,
53
- "requirements": {
54
- "age_minutes": 60,
55
- "care_mistakes_max": 0,
56
- "health_min": 80
57
- }
58
- }
59
- ]
60
- },
61
- "Koromon": {
62
- EvolutionStage.CHILD: [
63
- {
64
- "species": "Agumon",
65
- "path": EvolutionPath.NORMAL,
66
- "requirements": {
67
- "age_minutes": 1440, # 24 hours
68
- "stats_min": {"offense": 150, "life": 1200},
69
- "training_min": {"strength": 30},
70
- "care_quality_min": 1.0
71
- }
72
- },
73
- {
74
- "species": "Betamon",
75
- "path": EvolutionPath.VARIANT,
76
- "requirements": {
77
- "age_minutes": 1440,
78
- "stats_min": {"defense": 150, "brains": 120},
79
- "training_min": {"intelligence": 30},
80
- "care_quality_min": 1.2
81
- }
82
- },
83
- {
84
- "species": "Kunemon",
85
- "path": EvolutionPath.CORRUPTED,
86
- "requirements": {
87
- "age_minutes": 1440,
88
- "care_mistakes_min": 3,
89
- "happiness_max": 40
90
- }
91
- }
92
- ]
93
- },
94
- "Agumon": {
95
- EvolutionStage.ADULT: [
96
- {
97
- "species": "Greymon",
98
- "path": EvolutionPath.NORMAL,
99
- "requirements": {
100
- "age_minutes": 4320, # 72 hours
101
- "stats_min": {"offense": 250, "life": 1800},
102
- "training_min": {"strength": 80},
103
- "care_quality_min": 1.3,
104
- "battle_wins_min": 5
105
- }
106
- },
107
- {
108
- "species": "Tyrannomon",
109
- "path": EvolutionPath.VARIANT,
110
- "requirements": {
111
- "age_minutes": 4320,
112
- "stats_min": {"offense": 300, "life": 2000},
113
- "training_min": {"strength": 100, "endurance": 50},
114
- "care_quality_min": 1.1,
115
- "discipline_min": 70
116
- }
117
- },
118
- {
119
- "species": "Meramon",
120
- "path": EvolutionPath.SPECIAL,
121
- "requirements": {
122
- "age_minutes": 4320,
123
- "stats_min": {"offense": 200, "brains": 180},
124
- "training_min": {"spirit": 60},
125
- "special_item": "Fire_Crystal",
126
- "care_quality_min": 1.5
127
- }
128
- }
129
- ]
130
- },
131
- "Greymon": {
132
- EvolutionStage.PERFECT: [
133
- {
134
- "species": "MetalGreymon",
135
- "path": EvolutionPath.NORMAL,
136
- "requirements": {
137
- "age_minutes": 8640, # 144 hours (6 days)
138
- "stats_min": {"offense": 400, "life": 2800, "defense": 300},
139
- "training_min": {"strength": 150, "technique": 100},
140
- "care_quality_min": 1.6,
141
- "battle_wins_min": 15,
142
- "relationship_level_min": 80
143
- }
144
- },
145
- {
146
- "species": "SkullGreymon",
147
- "path": EvolutionPath.CORRUPTED,
148
- "requirements": {
149
- "age_minutes": 8640,
150
- "stats_min": {"offense": 450},
151
- "care_mistakes_min": 8,
152
- "overtraining": True,
153
- "happiness_max": 30
154
- }
155
- }
156
- ]
157
- },
158
- "MetalGreymon": {
159
- EvolutionStage.ULTIMATE: [
160
- {
161
- "species": "WarGreymon",
162
- "path": EvolutionPath.LEGENDARY,
163
- "requirements": {
164
- "age_minutes": 14400, # 10 days
165
- "stats_min": {"offense": 600, "life": 4000, "defense": 500, "brains": 400},
166
- "training_min": {"strength": 200, "technique": 150, "spirit": 100},
167
- "care_quality_min": 1.8,
168
- "battle_wins_min": 50,
169
- "relationship_level_min": 95,
170
- "special_achievements": ["Perfect_Care_Week", "Master_Trainer"]
171
- }
172
- }
173
- ]
174
- }
175
- }
176
-
177
- def _initialize_evolution_requirements(self) -> Dict[str, Any]:
178
- """Initialize detailed evolution requirement checkers"""
179
- return {
180
- "age_requirements": {
181
- "check": lambda monster, req: monster.lifecycle.age_minutes >= req,
182
- "display": lambda req: f"Age: {req/1440:.1f} days"
183
- },
184
- "stat_requirements": {
185
- "check": self._check_stat_requirements,
186
- "display": lambda req: f"Stats: {', '.join([f'{k}≥{v}' for k, v in req.items()])}"
187
- },
188
- "training_requirements": {
189
- "check": self._check_training_requirements,
190
- "display": lambda req: f"Training: {', '.join([f'{k}≥{v}' for k, v in req.items()])}"
191
- },
192
- "care_quality_requirements": {
193
- "check": lambda monster, req: monster.stats.care_quality_score >= req,
194
- "display": lambda req: f"Care Quality: {req:.1f}"
195
- },
196
- "item_requirements": {
197
- "check": self._check_item_requirements,
198
- "display": lambda req: f"Required Item: {req}"
199
- },
200
- "special_requirements": {
201
- "check": self._check_stat_requirements,
202
- "display": lambda req: f"Special: {', '.join(req) if isinstance(req, list) else req}"
203
- }
204
- }
205
-
206
- def _initialize_special_conditions(self) -> Dict[str, Any]:
207
- """Initialize special evolution conditions"""
208
- return {
209
- "perfect_care_week": {
210
- "description": "No care mistakes for 7 consecutive days",
211
- "check": self._check_perfect_care_week
212
- },
213
- "master_trainer": {
214
- "description": "Complete all training types to level 150+",
215
- "check": self._check_master_trainer
216
- },
217
- "bond_master": {
218
- "description": "Reach maximum relationship level",
219
- "check": lambda monster: monster.personality.relationship_level >= 100
220
- },
221
- "evolution_master": {
222
- "description": "Successfully evolve 10+ monsters",
223
- "check": self._check_evolution_master
224
- },
225
- "overtraining": {
226
- "description": "Training stats significantly exceed normal limits",
227
- "check": self._check_overtraining
228
- }
229
- }
230
-
231
- async def check_evolution_eligibility(self, monster: Monster) -> Dict[str, Any]:
232
- """Check if monster is eligible for evolution and return detailed info"""
233
- try:
234
- current_species = monster.species
235
- current_stage = monster.lifecycle.stage
236
-
237
- # Get possible evolutions
238
- possible_evolutions = self.evolution_trees.get(current_species, {}).get(current_stage, [])
239
-
240
- if not possible_evolutions:
241
- return {
242
- "can_evolve": False,
243
- "reason": "No evolution paths available",
244
- "possible_evolutions": []
245
- }
246
-
247
- evolution_results = []
248
-
249
- for evolution_option in possible_evolutions:
250
- species = evolution_option["species"]
251
- path = evolution_option["path"]
252
- requirements = evolution_option["requirements"]
253
-
254
- # Check each requirement
255
- met_requirements = []
256
- missing_requirements = []
257
-
258
- for req_type, req_value in requirements.items():
259
- is_met = await self._check_requirement(monster, req_type, req_value)
260
-
261
- requirement_info = {
262
- "type": req_type,
263
- "requirement": req_value,
264
- "current_value": self._get_current_value(monster, req_type),
265
- "is_met": is_met
266
- }
267
-
268
- if is_met:
269
- met_requirements.append(requirement_info)
270
- else:
271
- missing_requirements.append(requirement_info)
272
-
273
- # Calculate evolution readiness percentage
274
- total_requirements = len(met_requirements) + len(missing_requirements)
275
- readiness_percentage = (len(met_requirements) / total_requirements * 100) if total_requirements > 0 else 0
276
-
277
- evolution_results.append({
278
- "species": species,
279
- "path": path.value,
280
- "readiness_percentage": readiness_percentage,
281
- "can_evolve": len(missing_requirements) == 0,
282
- "met_requirements": met_requirements,
283
- "missing_requirements": missing_requirements,
284
- "estimated_time_to_eligible": self._estimate_time_to_eligible(missing_requirements)
285
- })
286
-
287
- # Find the best evolution option
288
- eligible_evolutions = [e for e in evolution_results if e["can_evolve"]]
289
- best_option = max(evolution_results, key=lambda x: x["readiness_percentage"]) if evolution_results else None
290
-
291
- return {
292
- "can_evolve": len(eligible_evolutions) > 0,
293
- "eligible_evolutions": eligible_evolutions,
294
- "best_option": best_option,
295
- "all_options": evolution_results,
296
- "evolution_locked": monster.lifecycle.evolution_locked_until and
297
- monster.lifecycle.evolution_locked_until > datetime.now()
298
- }
299
-
300
- except Exception as e:
301
- self.logger.error(f"Evolution eligibility check failed: {e}")
302
- return {
303
- "can_evolve": False,
304
- "reason": f"Error checking evolution: {str(e)}",
305
- "possible_evolutions": []
306
- }
307
-
308
- async def trigger_evolution(self, monster: Monster, target_species: str = None) -> Dict[str, Any]:
309
- """Trigger monster evolution"""
310
- try:
311
- # Check if evolution is locked
312
- if monster.lifecycle.evolution_locked_until and monster.lifecycle.evolution_locked_until > datetime.now():
313
- return {
314
- "success": False,
315
- "reason": "Evolution is temporarily locked",
316
- "unlock_time": monster.lifecycle.evolution_locked_until
317
- }
318
-
319
- # Get evolution eligibility
320
- eligibility = await self.check_evolution_eligibility(monster)
321
-
322
- if not eligibility["can_evolve"]:
323
- return {
324
- "success": False,
325
- "reason": "Evolution requirements not met",
326
- "eligibility": eligibility
327
- }
328
-
329
- # Select evolution target
330
- eligible_evolutions = eligibility["eligible_evolutions"]
331
-
332
- if target_species:
333
- # Specific evolution requested
334
- target_evolution = next((e for e in eligible_evolutions if e["species"] == target_species), None)
335
- if not target_evolution:
336
- return {
337
- "success": False,
338
- "reason": f"Cannot evolve to {target_species}",
339
- "available_options": [e["species"] for e in eligible_evolutions]
340
- }
341
- else:
342
- # Choose best available evolution
343
- target_evolution = max(eligible_evolutions, key=lambda x: x["readiness_percentage"])
344
-
345
- # Store previous state
346
- previous_species = monster.species
347
- previous_stage = monster.lifecycle.stage
348
-
349
- # Apply evolution
350
- await self._apply_evolution(monster, target_evolution)
351
-
352
- # Log evolution event
353
- evolution_result = {
354
- "success": True,
355
- "previous_species": previous_species,
356
- "previous_stage": previous_stage.value,
357
- "new_species": monster.species,
358
- "new_stage": monster.lifecycle.stage.value,
359
- "evolution_path": target_evolution["path"],
360
- "stat_bonuses": self._calculate_evolution_bonuses(target_evolution),
361
- "timestamp": datetime.now()
362
- }
363
-
364
- self.logger.info(f"Monster evolved: {previous_species} -> {monster.species}")
365
-
366
- return evolution_result
367
-
368
- except Exception as e:
369
- self.logger.error(f"Evolution trigger failed: {e}")
370
- return {
371
- "success": False,
372
- "reason": f"Evolution failed: {str(e)}"
373
- }
374
-
375
- async def _apply_evolution(self, monster: Monster, evolution_data: Dict[str, Any]):
376
- """Apply evolution changes to monster"""
377
- # Update basic info
378
- monster.species = evolution_data["species"]
379
-
380
- # Determine new stage
381
- stage_progression = {
382
- EvolutionStage.EGG: EvolutionStage.BABY,
383
- EvolutionStage.BABY: EvolutionStage.CHILD,
384
- EvolutionStage.CHILD: EvolutionStage.ADULT,
385
- EvolutionStage.ADULT: EvolutionStage.PERFECT,
386
- EvolutionStage.PERFECT: EvolutionStage.ULTIMATE,
387
- EvolutionStage.ULTIMATE: EvolutionStage.MEGA
388
- }
389
-
390
- new_stage = stage_progression.get(monster.lifecycle.stage)
391
- if new_stage:
392
- monster.lifecycle.stage = new_stage
393
-
394
- # Apply stat bonuses
395
- bonuses = self._calculate_evolution_bonuses(evolution_data)
396
- for stat, bonus in bonuses.items():
397
- if hasattr(monster.stats, stat):
398
- current_value = getattr(monster.stats, stat)
399
- new_value = int(current_value * bonus["multiplier"]) + bonus["flat_bonus"]
400
- setattr(monster.stats, stat, new_value)
401
-
402
- # Reset some care stats
403
- monster.stats.happiness = min(100, monster.stats.happiness + 20)
404
- monster.stats.health = min(100, monster.stats.health + 30)
405
- monster.stats.energy = min(100, monster.stats.energy + 40)
406
-
407
- # Update personality based on evolution path
408
- self._apply_personality_changes(monster, evolution_data["path"])
409
-
410
- # Set evolution cooldown
411
- monster.lifecycle.evolution_locked_until = datetime.now() + timedelta(hours=24)
412
-
413
- # Update emotional state
414
- monster.emotional_state = EmotionalState.ECSTATIC
415
-
416
- # Add evolution achievement
417
- if "special_achievements" not in monster.performance_metrics:
418
- monster.performance_metrics["special_achievements"] = []
419
-
420
- monster.performance_metrics["special_achievements"].append({
421
- "type": "evolution",
422
- "species": monster.species,
423
- "timestamp": datetime.now().isoformat()
424
- })
425
-
426
- def _calculate_evolution_bonuses(self, evolution_data: Dict[str, Any]) -> Dict[str, Dict[str, float]]:
427
- """Calculate stat bonuses for evolution"""
428
- base_bonuses = {
429
- "life": {"multiplier": 1.3, "flat_bonus": 200},
430
- "mp": {"multiplier": 1.2, "flat_bonus": 50},
431
- "offense": {"multiplier": 1.25, "flat_bonus": 30},
432
- "defense": {"multiplier": 1.25, "flat_bonus": 30},
433
- "speed": {"multiplier": 1.2, "flat_bonus": 20},
434
- "brains": {"multiplier": 1.15, "flat_bonus": 25}
435
- }
436
-
437
- # Modify bonuses based on evolution path
438
- path_modifiers = {
439
- EvolutionPath.NORMAL: 1.0,
440
- EvolutionPath.VARIANT: 1.1,
441
- EvolutionPath.SPECIAL: 1.3,
442
- EvolutionPath.CORRUPTED: 0.9,
443
- EvolutionPath.LEGENDARY: 1.5
444
- }
445
-
446
- evolution_path = EvolutionPath(evolution_data["path"])
447
- modifier = path_modifiers.get(evolution_path, 1.0)
448
-
449
- # Apply modifier to bonuses
450
- modified_bonuses = {}
451
- for stat, bonus in base_bonuses.items():
452
- modified_bonuses[stat] = {
453
- "multiplier": bonus["multiplier"] * modifier,
454
- "flat_bonus": int(bonus["flat_bonus"] * modifier)
455
- }
456
-
457
- return modified_bonuses
458
-
459
- def _apply_personality_changes(self, monster: Monster, evolution_path: str):
460
- """Apply personality changes based on evolution path"""
461
- path_personality_effects = {
462
- EvolutionPath.NORMAL: {
463
- "conscientiousness": 0.05,
464
- "stability": 0.03
465
- },
466
- EvolutionPath.VARIANT: {
467
- "openness": 0.08,
468
- "curiosity": 0.05
469
- },
470
- EvolutionPath.SPECIAL: {
471
- "extraversion": 0.1,
472
- "confidence": 0.07
473
- },
474
- EvolutionPath.CORRUPTED: {
475
- "neuroticism": 0.15,
476
- "aggression": 0.1,
477
- "happiness_decay_rate": 1.2
478
- },
479
- EvolutionPath.LEGENDARY: {
480
- "all_traits": 0.1,
481
- "relationship_bonus": 10
482
- }
483
- }
484
-
485
- effects = path_personality_effects.get(EvolutionPath(evolution_path), {})
486
-
487
- for trait, change in effects.items():
488
- if trait == "all_traits":
489
- # Boost all personality traits
490
- for personality_trait in ["openness", "conscientiousness", "extraversion", "agreeableness"]:
491
- if hasattr(monster.personality, personality_trait):
492
- current = getattr(monster.personality, personality_trait)
493
- setattr(monster.personality, personality_trait, min(1.0, current + change))
494
- elif trait == "relationship_bonus":
495
- monster.personality.relationship_level = min(100, monster.personality.relationship_level + change)
496
- elif hasattr(monster.personality, trait):
497
- current = getattr(monster.personality, trait)
498
- setattr(monster.personality, trait, min(1.0, max(0.0, current + change)))
499
-
500
- async def _check_requirement(self, monster: Monster, req_type: str, req_value: Any) -> bool:
501
- """Check if a specific requirement is met"""
502
- try:
503
- if req_type == "age_minutes":
504
- return monster.lifecycle.age_minutes >= req_value
505
-
506
- elif req_type == "care_mistakes_max":
507
- return monster.lifecycle.care_mistakes <= req_value
508
-
509
- elif req_type == "care_mistakes_min":
510
- return monster.lifecycle.care_mistakes >= req_value
511
-
512
- elif req_type == "stats_min":
513
- return self._check_stat_requirements(monster, req_value)
514
-
515
- elif req_type == "training_min":
516
- return self._check_training_requirements(monster, req_value)
517
-
518
- elif req_type == "care_quality_min":
519
- return monster.stats.care_quality_score >= req_value
520
-
521
- elif req_type == "health_min":
522
- return monster.stats.health >= req_value
523
-
524
- elif req_type == "happiness_max":
525
- return monster.stats.happiness <= req_value
526
-
527
- elif req_type == "happiness_min":
528
- return monster.stats.happiness >= req_value
529
-
530
- elif req_type == "discipline_min":
531
- return monster.stats.discipline >= req_value
532
-
533
- elif req_type == "relationship_level_min":
534
- return monster.personality.relationship_level >= req_value
535
-
536
- elif req_type == "special_item":
537
- return req_value in monster.inventory and monster.inventory[req_value] > 0
538
-
539
- elif req_type == "special_achievements":
540
- return self._check_special_achievements(monster, req_value)
541
-
542
- elif req_type == "battle_wins_min":
543
- return monster.performance_metrics.get("battle_wins", 0) >= req_value
544
-
545
- elif req_type == "overtraining":
546
- return self._check_overtraining(monster)
547
-
548
- else:
549
- self.logger.warning(f"Unknown requirement type: {req_type}")
550
- return False
551
-
552
- except Exception as e:
553
- self.logger.error(f"Requirement check failed for {req_type}: {e}")
554
- return False
555
-
556
- def _check_stat_requirements(self, monster: Monster, requirements: Dict[str, int]) -> bool:
557
- """Check if stat requirements are met"""
558
- for stat_name, min_value in requirements.items():
559
- if hasattr(monster.stats, stat_name):
560
- current_value = getattr(monster.stats, stat_name)
561
- if current_value < min_value:
562
- return False
563
- else:
564
- return False
565
- return True
566
-
567
- def _check_training_requirements(self, monster: Monster, requirements: Dict[str, int]) -> bool:
568
- """Check if training requirements are met"""
569
- for training_type, min_value in requirements.items():
570
- current_value = monster.stats.training_progress.get(training_type, 0)
571
- if current_value < min_value:
572
- return False
573
- return True
574
-
575
- def _check_item_requirements(self, monster: Monster, item_name: str) -> bool:
576
- """Check if monster has required item"""
577
- return item_name in monster.inventory and monster.inventory[item_name] > 0
578
-
579
- def _check_special_achievements(self, monster: Monster, required_achievements: List[str]) -> bool:
580
- """Check if special achievements are unlocked"""
581
- achievements = monster.performance_metrics.get("special_achievements", [])
582
- achievement_types = [a.get("type") for a in achievements if isinstance(a, dict)]
583
-
584
- for required in required_achievements:
585
- if required not in achievement_types:
586
- return False
587
- return True
588
-
589
- def _check_overtraining(self, monster: Monster) -> bool:
590
- """Check if monster is overtrained"""
591
- training_totals = sum(monster.stats.training_progress.values())
592
- return training_totals > 800 # Threshold for overtraining
593
-
594
- def _check_perfect_care_week(self, monster: Monster) -> bool:
595
- """Check if monster had perfect care for a week"""
596
- # Simplified check - would need more complex tracking in production
597
- return monster.lifecycle.care_mistakes == 0 and monster.lifecycle.age_minutes >= 10080 # 7 days
598
-
599
- def _check_master_trainer(self, monster: Monster) -> bool:
600
- """Check if all training types are at 150+"""
601
- for training_type in ["strength", "endurance", "intelligence", "dexterity", "spirit", "technique"]:
602
- if monster.stats.training_progress.get(training_type, 0) < 150:
603
- return False
604
- return True
605
-
606
- def _check_evolution_master(self, monster: Monster) -> bool:
607
- """Check if player has evolved many monsters"""
608
- # This would need global tracking in production
609
- evolutions = [a for a in monster.performance_metrics.get("special_achievements", [])
610
- if isinstance(a, dict) and a.get("type") == "evolution"]
611
- return len(evolutions) >= 10
612
-
613
- def _get_current_value(self, monster: Monster, req_type: str) -> Any:
614
- """Get current value for a requirement type"""
615
- value_getters = {
616
- "age_minutes": lambda: monster.lifecycle.age_minutes,
617
- "care_mistakes_max": lambda: monster.lifecycle.care_mistakes,
618
- "care_mistakes_min": lambda: monster.lifecycle.care_mistakes,
619
- "health_min": lambda: monster.stats.health,
620
- "happiness_max": lambda: monster.stats.happiness,
621
- "happiness_min": lambda: monster.stats.happiness,
622
- "discipline_min": lambda: monster.stats.discipline,
623
- "care_quality_min": lambda: monster.stats.care_quality_score,
624
- "relationship_level_min": lambda: monster.personality.relationship_level,
625
- "battle_wins_min": lambda: monster.performance_metrics.get("battle_wins", 0)
626
- }
627
-
628
- getter = value_getters.get(req_type)
629
- return getter() if getter else "N/A"
630
-
631
- def _estimate_time_to_eligible(self, missing_requirements: List[Dict[str, Any]]) -> str:
632
- """Estimate time until evolution requirements are met"""
633
- time_estimates = []
634
-
635
- for req in missing_requirements:
636
- req_type = req["type"]
637
-
638
- if req_type == "age_minutes":
639
- current = req["current_value"]
640
- required = req["requirement"]
641
- remaining_minutes = required - current
642
- time_estimates.append(f"{remaining_minutes/1440:.1f} days")
643
-
644
- elif "training" in req_type:
645
- # Estimate based on training rate
646
- time_estimates.append("1-3 days of training")
647
-
648
- elif "stat" in req_type:
649
- # Estimate based on training and care
650
- time_estimates.append("2-5 days of care/training")
651
-
652
- else:
653
- time_estimates.append("Variable")
654
-
655
- return ", ".join(time_estimates) if time_estimates else "Ready now"