BladeSzaSza commited on
Commit
cc6b5d3
·
1 Parent(s): f54c072

fixed temp files

Browse files
Files changed (2) hide show
  1. .claude/settings.local.json +2 -1
  2. app.py +28 -8
.claude/settings.local.json CHANGED
@@ -5,7 +5,8 @@
5
  "Bash(ls:*)",
6
  "Bash(tree:*)",
7
  "Bash(find:*)",
8
- "Bash(mkdir:*)"
 
9
  ],
10
  "deny": []
11
  }
 
5
  "Bash(ls:*)",
6
  "Bash(tree:*)",
7
  "Bash(find:*)",
8
+ "Bash(mkdir:*)",
9
+ "Bash(grep:*)"
10
  ],
11
  "deny": []
12
  }
app.py CHANGED
@@ -15,6 +15,13 @@ DATA_DIR.mkdir(exist_ok=True)
15
  (DATA_DIR / "models").mkdir(exist_ok=True)
16
  (DATA_DIR / "cache").mkdir(exist_ok=True)
17
 
 
 
 
 
 
 
 
18
  # Import modules (to be created)
19
  from core.ai_pipeline import MonsterGenerationPipeline
20
  from core.game_mechanics import GameMechanics
@@ -30,12 +37,19 @@ def initialize_systems():
30
  pipeline = MonsterGenerationPipeline()
31
  return pipeline
32
 
33
- # Initialize core systems
34
- try:
35
- pipeline = initialize_systems()
36
- except Exception as e:
37
- print(f"GPU initialization failed, falling back to CPU: {e}")
38
- pipeline = MonsterGenerationPipeline(device="cpu")
 
 
 
 
 
 
 
39
 
40
  game_mechanics = GameMechanics()
41
  state_manager = StateManager(DATA_DIR)
@@ -60,7 +74,8 @@ def generate_monster(oauth_profile: gr.OAuthProfile | None, audio_input=None, te
60
 
61
  try:
62
  # Generate monster using AI pipeline
63
- result = pipeline.generate_monster(
 
64
  audio_input=audio_input,
65
  text_input=text_input,
66
  reference_images=reference_images,
@@ -95,7 +110,8 @@ def generate_monster(oauth_profile: gr.OAuthProfile | None, audio_input=None, te
95
  except Exception as e:
96
  print(f"Error generating monster: {str(e)}")
97
  # Use fallback generation
98
- fallback_result = pipeline.fallback_generation(text_input or "friendly digital creature")
 
99
  fallback_dict = {
100
  "message": "⚡ Created using quick generation mode",
101
  "image": fallback_result.get('image'),
@@ -418,6 +434,10 @@ with gr.Blocks(
418
 
419
  # Launch the app
420
  if __name__ == "__main__":
 
 
 
 
421
  demo.queue(
422
  default_concurrency_limit=10,
423
  max_size=100
 
15
  (DATA_DIR / "models").mkdir(exist_ok=True)
16
  (DATA_DIR / "cache").mkdir(exist_ok=True)
17
 
18
+ # Ensure Gradio cache directory exists
19
+ import tempfile
20
+ gradio_cache_dir = Path("/tmp/gradio")
21
+ gradio_cache_dir.mkdir(parents=True, exist_ok=True)
22
+ # Set environment variable for Gradio cache
23
+ os.environ.setdefault("GRADIO_TEMP_DIR", str(gradio_cache_dir))
24
+
25
  # Import modules (to be created)
26
  from core.ai_pipeline import MonsterGenerationPipeline
27
  from core.game_mechanics import GameMechanics
 
37
  pipeline = MonsterGenerationPipeline()
38
  return pipeline
39
 
40
+ # Initialize core systems (defer GPU initialization)
41
+ pipeline = None
42
+
43
+ def get_pipeline():
44
+ """Get or initialize the pipeline with GPU support"""
45
+ global pipeline
46
+ if pipeline is None:
47
+ try:
48
+ pipeline = initialize_systems()
49
+ except Exception as e:
50
+ print(f"GPU initialization failed, falling back to CPU: {e}")
51
+ pipeline = MonsterGenerationPipeline(device="cpu")
52
+ return pipeline
53
 
54
  game_mechanics = GameMechanics()
55
  state_manager = StateManager(DATA_DIR)
 
74
 
75
  try:
76
  # Generate monster using AI pipeline
77
+ current_pipeline = get_pipeline()
78
+ result = current_pipeline.generate_monster(
79
  audio_input=audio_input,
80
  text_input=text_input,
81
  reference_images=reference_images,
 
110
  except Exception as e:
111
  print(f"Error generating monster: {str(e)}")
112
  # Use fallback generation
113
+ current_pipeline = get_pipeline()
114
+ fallback_result = current_pipeline.fallback_generation(text_input or "friendly digital creature")
115
  fallback_dict = {
116
  "message": "⚡ Created using quick generation mode",
117
  "image": fallback_result.get('image'),
 
434
 
435
  # Launch the app
436
  if __name__ == "__main__":
437
+ # Suppress MCP warnings if needed
438
+ import warnings
439
+ warnings.filterwarnings("ignore", category=UserWarning, module="gradio.mcp")
440
+
441
  demo.queue(
442
  default_concurrency_limit=10,
443
  max_size=100