BladeSzaSza commited on
Commit
efef91a
·
verified ·
1 Parent(s): d49dc0d

Upload folder using huggingface_hub

Browse files
.gitignore CHANGED
@@ -327,4 +327,7 @@ tb_logs/
327
  outputs/
328
  .hydra/
329
 
330
- .working/
 
 
 
 
327
  outputs/
328
  .hydra/
329
 
330
+ .working/
331
+
332
+ # Claude Code project instructions
333
+ CLAUDE.md
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Laban Movement Analysis
3
- emoji: 🏃
4
  colorFrom: purple
5
  colorTo: green
6
  app_file: app.py
@@ -20,14 +20,43 @@ tags:
20
  - mediapipe
21
  - yolo
22
  - gradio
23
- short_description: Professional movement analysis with pose estimation and AI
 
 
 
24
  license: apache-2.0
25
  ---
26
 
27
- # `gradio_labanmovementanalysis`
28
  <a href="https://pypi.org/project/gradio_labanmovementanalysis/" target="_blank"><img alt="PyPI - Version" src="https://img.shields.io/pypi/v/gradio_labanmovementanalysis"></a>
29
 
30
- A Gradio 5 component for video movement analysis using Laban Movement Analysis (LMA) with MCP support for AI agents
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  ## Installation
33
 
@@ -46,8 +75,8 @@ Author: Csaba (BladeSzaSza)
46
 
47
  import gradio as gr
48
  import os
49
- from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
50
- # from gradio_labanmovementanalysis import LabanMovementAnalysis
51
 
52
  # Import agent API if available
53
  # Initialize agent API if available
@@ -59,15 +88,10 @@ try:
59
  MovementDirection,
60
  MovementIntensity
61
  )
62
- HAS_AGENT_API = True
63
-
64
- try:
65
- agent_api = LabanAgentAPI()
66
- except Exception as e:
67
- print(f"Warning: Agent API not available: {e}")
68
- agent_api = None
69
- except ImportError:
70
- HAS_AGENT_API = False
71
  # Initialize components
72
  try:
73
  analyzer = LabanMovementAnalysis(
@@ -99,21 +123,38 @@ def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
99
  error_result = {"error": str(e)}
100
  return error_result, None
101
 
102
- def process_video_standard(video, model, enable_viz, include_keypoints):
103
- """Standard video processing function."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  if video is None:
105
- return None, None
106
-
107
  try:
108
- json_output, video_output = analyzer.process_video(
109
  video,
110
  model=model,
111
- enable_visualization=enable_viz,
112
  include_keypoints=include_keypoints
113
  )
114
- return json_output, video_output
115
- except Exception as e:
116
- return {"error": str(e)}, None
 
 
117
 
118
  # ── 4. Build UI ────��────────────────────────────────────────────
119
  def create_demo() -> gr.Blocks:
@@ -122,7 +163,7 @@ def create_demo() -> gr.Blocks:
122
  theme='gstaff/sketch',
123
  fill_width=True,
124
  ) as demo:
125
-
126
  # ── Hero banner ──
127
  gr.Markdown(
128
  """
@@ -214,10 +255,15 @@ def create_demo() -> gr.Blocks:
214
  """
215
  )
216
  return demo
217
-
 
 
 
 
218
  if __name__ == "__main__":
219
  demo = create_demo()
220
  demo.launch(server_name="0.0.0.0",
 
221
  server_port=int(os.getenv("PORT", 7860)),
222
  mcp_server=True)
223
 
 
1
  ---
2
  title: Laban Movement Analysis
3
+ emoji: 🩰
4
  colorFrom: purple
5
  colorTo: green
6
  app_file: app.py
 
20
  - mediapipe
21
  - yolo
22
  - gradio
23
+ - agentic-analysis
24
+ - overlay-video
25
+ - temporal-patterns
26
+ short_description: Laban Movement Analysis (LMA) from pose estimation
27
  license: apache-2.0
28
  ---
29
 
30
+ # 🩰 Laban Movement Analysis
31
  <a href="https://pypi.org/project/gradio_labanmovementanalysis/" target="_blank"><img alt="PyPI - Version" src="https://img.shields.io/pypi/v/gradio_labanmovementanalysis"></a>
32
 
33
+ **Advanced video movement analysis platform** combining Laban Movement Analysis (LMA) principles with modern AI pose estimation, intelligent analysis, and interactive visualization.
34
+
35
+ ## 🌟 Key Features
36
+
37
+ ### 📊 **Multi-Model Pose Estimation**
38
+ - **15 different pose estimation models** from multiple sources:
39
+ - **MediaPipe**: `mediapipe-lite`, `mediapipe-full`, `mediapipe-heavy`
40
+ - **MoveNet**: `movenet-lightning`, `movenet-thunder`
41
+ - **YOLO v8**: `yolo-v8-n/s/m/l/x` (5 variants)
42
+ - **YOLO v11**: `yolo-v11-n/s/m/l/x` (5 variants)
43
+
44
+ ### 🎥 **Comprehensive Video Processing**
45
+ - **JSON Analysis Output**: Detailed movement metrics with temporal data
46
+ - **Annotated Video Generation**: Pose overlay with Laban movement data
47
+ - **URL Support**: Direct processing from YouTube, Vimeo, and video URLs
48
+ - **Custom Overlay Component**: `gradio_overlay_video` for controlled layered visualization
49
+
50
+ ### 🤖 **Agentic Intelligence**
51
+ - **SUMMARY Analysis**: Narrative movement interpretation with temporal patterns
52
+ - **STRUCTURED Analysis**: Quantitative breakdowns and statistical insights
53
+ - **MOVEMENT FILTERS**: Pattern detection with intelligent filtering
54
+ - **Laban Interpretation**: Professional movement quality assessment
55
+
56
+ ### 🎨 **Interactive Visualization**
57
+ - **Standard Analysis Tab**: Core pose estimation and LMA processing
58
+ - **Overlay Visualization Tab**: Interactive layered video display
59
+ - **Agentic Analysis Tab**: AI-powered movement insights and filtering
60
 
61
  ## Installation
62
 
 
75
 
76
  import gradio as gr
77
  import os
78
+ # from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
79
+ from gradio_labanmovementanalysis import LabanMovementAnalysis
80
 
81
  # Import agent API if available
82
  # Initialize agent API if available
 
88
  MovementDirection,
89
  MovementIntensity
90
  )
91
+ agent_api = LabanAgentAPI()
92
+ except Exception as e:
93
+ print(f"Warning: Agent API not available: {e}")
94
+ agent_api = None
 
 
 
 
 
95
  # Initialize components
96
  try:
97
  analyzer = LabanMovementAnalysis(
 
123
  error_result = {"error": str(e)}
124
  return error_result, None
125
 
126
+ def process_video_standard(video : str, model : str, include_keypoints : bool) -> dict:
127
+ """
128
+ Processes a video file using the specified pose estimation model and returns movement analysis results.
129
+
130
+ Args:
131
+ video (str): Path to the video file to be analyzed.
132
+ model (str): The name of the pose estimation model to use (e.g., "mediapipe-full", "movenet-thunder", etc.).
133
+ include_keypoints (bool): Whether to include raw keypoint data in the output.
134
+
135
+ Returns:
136
+ dict:
137
+ - A dictionary containing the movement analysis results in JSON format, or an error message if processing fails.
138
+
139
+
140
+ Notes:
141
+ - Visualization is disabled in this standard processing function.
142
+ - If the input video is None, both return values will be None.
143
+ - If an error occurs during processing, the first return value will be a dictionary with an "error" key.
144
+ """
145
  if video is None:
146
+ return None
 
147
  try:
148
+ json_output = analyzer.process(
149
  video,
150
  model=model,
 
151
  include_keypoints=include_keypoints
152
  )
153
+
154
+
155
+ return json_output
156
+ except (RuntimeError, ValueError, OSError) as e:
157
+ return {"error": str(e)}
158
 
159
  # ── 4. Build UI ────��────────────────────────────────────────────
160
  def create_demo() -> gr.Blocks:
 
163
  theme='gstaff/sketch',
164
  fill_width=True,
165
  ) as demo:
166
+ # gr.api(process_video_standard, api_name="process_video") # <-- Remove from here
167
  # ── Hero banner ──
168
  gr.Markdown(
169
  """
 
255
  """
256
  )
257
  return demo
258
+
259
+ # Register API endpoint OUTSIDE the UI
260
+
261
+ gr.api(process_video_standard, api_name="process_video")
262
+
263
  if __name__ == "__main__":
264
  demo = create_demo()
265
  demo.launch(server_name="0.0.0.0",
266
+ share=True,
267
  server_port=int(os.getenv("PORT", 7860)),
268
  mcp_server=True)
269
 
app.py CHANGED
@@ -3,12 +3,12 @@
3
  Laban Movement Analysis – modernised Gradio Space
4
  Author: Csaba (BladeSzaSza)
5
  """
6
-
7
  import gradio as gr
8
  import os
9
- from spaces import GPU
10
- from src.backend.gradio_labanmovementanalysis import LabanMovementAnalysis
11
- # from gradio_labanmovementanalysis import LabanMovementAnalysis
 
12
 
13
  # Import agent API if available
14
  # Initialize agent API if available
@@ -20,14 +20,11 @@ try:
20
  MovementDirection,
21
  MovementIntensity
22
  )
 
23
  HAS_AGENT_API = True
24
-
25
- try:
26
- agent_api = LabanAgentAPI()
27
- except Exception as e:
28
- print(f"Warning: Agent API not available: {e}")
29
- agent_api = None
30
- except ImportError:
31
  HAS_AGENT_API = False
32
  # Initialize components
33
  try:
@@ -39,7 +36,7 @@ except Exception as e:
39
  print(f"Warning: Some features may not be available: {e}")
40
  analyzer = LabanMovementAnalysis()
41
 
42
- @GPU
43
  def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
44
  """Enhanced video processing with all new features."""
45
  if not video_input:
@@ -60,21 +57,86 @@ def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
60
  error_result = {"error": str(e)}
61
  return error_result, None
62
 
63
- def process_video_standard(video, model, enable_viz, include_keypoints):
64
- """Standard video processing function."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  if video is None:
66
- return None, None
67
-
68
  try:
69
- json_output, video_output = analyzer.process_video(
70
  video,
71
  model=model,
72
- enable_visualization=enable_viz,
73
  include_keypoints=include_keypoints
74
  )
75
- return json_output, video_output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  except Exception as e:
77
- return {"error": str(e)}, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
  # ── 4. Build UI ─────────────────────────────────────────────────
80
  def create_demo() -> gr.Blocks:
@@ -83,18 +145,18 @@ def create_demo() -> gr.Blocks:
83
  theme='gstaff/sketch',
84
  fill_width=True,
85
  ) as demo:
86
-
87
  # ── Hero banner ──
88
  gr.Markdown(
89
  """
90
- # 🎭 Laban Movement Analysis
91
 
92
  Pose estimation • AI action recognition • Movement Analysis
93
  """
94
  )
95
  with gr.Tabs():
96
  # Tab 1: Standard Analysis
97
- with gr.Tab("🎬 Standard Analysis"):
98
  gr.Markdown("""
99
  ### Upload a video file to analyze movement using traditional LMA metrics with pose estimation.
100
  """)
@@ -131,12 +193,12 @@ def create_demo() -> gr.Blocks:
131
  )
132
 
133
  with gr.Accordion("Analysis Options", open=False):
134
- enable_viz = gr.Radio([("Yes", 1), ("No", 0)], value=1, label="Visualization")
135
- include_kp = gr.Radio([("Yes", 1), ("No", 0)], value=0, label="Raw Keypoints")
136
 
137
  gr.Examples(
138
  examples=[
139
- ["src/examples/balette.mp4"],
140
  ["https://www.youtube.com/shorts/RX9kH2l3L8U"],
141
  ["https://vimeo.com/815392738"],
142
  ["https://vimeo.com/548964931"],
@@ -157,7 +219,9 @@ def create_demo() -> gr.Blocks:
157
  def process_enhanced_input(file_input, url_input, model, enable_viz, include_keypoints):
158
  """Process either file upload or URL input."""
159
  video_source = file_input if file_input else url_input
160
- return process_video_enhanced(video_source, model, enable_viz, include_keypoints)
 
 
161
 
162
  analyze_btn_enh.click(
163
  fn=process_enhanced_input,
@@ -166,18 +230,295 @@ def create_demo() -> gr.Blocks:
166
  api_name="analyze_enhanced"
167
  )
168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  # Footer
170
  with gr.Row():
171
  gr.Markdown(
172
  """
173
  **Built by Csaba Bolyós**
174
- [GitHub](https://github.com/bladeszasza) • [HF](https://huggingface.co/BladeSzaSza)
175
  """
176
  )
177
  return demo
178
-
 
179
  if __name__ == "__main__":
180
  demo = create_demo()
181
  demo.launch(server_name="0.0.0.0",
 
182
  server_port=int(os.getenv("PORT", 7860)),
183
  mcp_server=True)
 
3
  Laban Movement Analysis – modernised Gradio Space
4
  Author: Csaba (BladeSzaSza)
5
  """
 
6
  import gradio as gr
7
  import os
8
+ from pathlib import Path
9
+ # from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
10
+ from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
11
+ from gradio_overlay_video import OverlayVideo
12
 
13
  # Import agent API if available
14
  # Initialize agent API if available
 
20
  MovementDirection,
21
  MovementIntensity
22
  )
23
+ agent_api = LabanAgentAPI()
24
  HAS_AGENT_API = True
25
+ except Exception as e:
26
+ print(f"Warning: Agent API not available: {e}")
27
+ agent_api = None
 
 
 
 
28
  HAS_AGENT_API = False
29
  # Initialize components
30
  try:
 
36
  print(f"Warning: Some features may not be available: {e}")
37
  analyzer = LabanMovementAnalysis()
38
 
39
+
40
  def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
41
  """Enhanced video processing with all new features."""
42
  if not video_input:
 
57
  error_result = {"error": str(e)}
58
  return error_result, None
59
 
60
+ def process_video_standard(video : str, model : str, include_keypoints : bool) -> dict:
61
+ """
62
+ Processes a video file using the specified pose estimation model and returns movement analysis results.
63
+
64
+ Args:
65
+ video (str): Path to the video file to be analyzed.
66
+ model (str): The name of the pose estimation model to use (e.g., "mediapipe-full", "movenet-thunder", etc.).
67
+ include_keypoints (bool): Whether to include raw keypoint data in the output.
68
+
69
+ Returns:
70
+ dict:
71
+ - A dictionary containing the movement analysis results in JSON format, or an error message if processing fails.
72
+
73
+
74
+ Notes:
75
+ - Visualization is disabled in this standard processing function.
76
+ - If the input video is None, both return values will be None.
77
+ - If an error occurs during processing, the first return value will be a dictionary with an "error" key.
78
+ """
79
  if video is None:
80
+ return None
 
81
  try:
82
+ json_output, _ = analyzer.process_video(
83
  video,
84
  model=model,
85
+ enable_visualization=False,
86
  include_keypoints=include_keypoints
87
  )
88
+ return json_output
89
+ except (RuntimeError, ValueError, OSError) as e:
90
+ return {"error": str(e)}
91
+
92
+ def process_video_for_agent(video, model, output_format="summary"):
93
+ """Process video with agent-friendly output format."""
94
+ if not HAS_AGENT_API or agent_api is None:
95
+ return {"error": "Agent API not available"}
96
+
97
+ if not video:
98
+ return {"error": "No video provided"}
99
+
100
+ try:
101
+ model_enum = PoseModel(model)
102
+ result = agent_api.analyze(video, model=model_enum, generate_visualization=False)
103
+
104
+ if output_format == "summary":
105
+ return {"summary": agent_api.get_movement_summary(result)}
106
+ elif output_format == "structured":
107
+ return {
108
+ "success": result.success,
109
+ "direction": result.dominant_direction.value,
110
+ "intensity": result.dominant_intensity.value,
111
+ "speed": result.dominant_speed,
112
+ "fluidity": result.fluidity_score,
113
+ "expansion": result.expansion_score,
114
+ "segments": len(result.movement_segments)
115
+ }
116
+ else: # json
117
+ return result.raw_data
118
  except Exception as e:
119
+ return {"error": str(e)}
120
+
121
+ # Batch processing removed due to MediaPipe compatibility issues
122
+
123
+ # process_standard_for_agent is now imported from backend
124
+
125
+ # Movement filtering removed due to MediaPipe compatibility issues
126
+
127
+ # Import agentic analysis functions from backend
128
+ try:
129
+ from gradio_labanmovementanalysis.agentic_analysis import (
130
+ generate_agentic_analysis,
131
+ process_standard_for_agent
132
+ )
133
+ except ImportError:
134
+ # Fallback if backend module is not available
135
+ def generate_agentic_analysis(json_data, analysis_type, filter_direction="any", filter_intensity="any", filter_min_fluidity=0.0, filter_min_expansion=0.0):
136
+ return {"error": "Agentic analysis backend not available"}
137
+
138
+ def process_standard_for_agent(json_data, output_format="summary"):
139
+ return {"error": "Agent conversion backend not available"}
140
 
141
  # ── 4. Build UI ─────────────────────────────────────────────────
142
  def create_demo() -> gr.Blocks:
 
145
  theme='gstaff/sketch',
146
  fill_width=True,
147
  ) as demo:
148
+ # gr.api(process_video_standard, api_name="process_video")
149
  # ── Hero banner ──
150
  gr.Markdown(
151
  """
152
+ # 🩰 Laban Movement Analysis
153
 
154
  Pose estimation • AI action recognition • Movement Analysis
155
  """
156
  )
157
  with gr.Tabs():
158
  # Tab 1: Standard Analysis
159
+ with gr.Tab("🎭 Standard Analysis"):
160
  gr.Markdown("""
161
  ### Upload a video file to analyze movement using traditional LMA metrics with pose estimation.
162
  """)
 
193
  )
194
 
195
  with gr.Accordion("Analysis Options", open=False):
196
+ enable_viz = gr.Radio([("Create", 1), ("Dismiss", 0)], value=1, label="Visualization")
197
+ include_kp = gr.Radio([("Include", 1), ("Exclude", 0)], value=1, label="Raw Keypoints")
198
 
199
  gr.Examples(
200
  examples=[
201
+ ["examples/balette.mp4"],
202
  ["https://www.youtube.com/shorts/RX9kH2l3L8U"],
203
  ["https://vimeo.com/815392738"],
204
  ["https://vimeo.com/548964931"],
 
219
  def process_enhanced_input(file_input, url_input, model, enable_viz, include_keypoints):
220
  """Process either file upload or URL input."""
221
  video_source = file_input if file_input else url_input
222
+ [json_out, viz_out] = process_video_enhanced(video_source, model, enable_viz, include_keypoints)
223
+ overlay_video.value = (None, json_out)
224
+ return [json_out, viz_out]
225
 
226
  analyze_btn_enh.click(
227
  fn=process_enhanced_input,
 
230
  api_name="analyze_enhanced"
231
  )
232
 
233
+ with gr.Tab("🎬 Overlayed Visualisation"):
234
+ gr.Markdown(
235
+ "# 🩰 Interactive Pose Visualization\n"
236
+ "## See the movement analysis in action with an interactive overlay. "
237
+ "Analyze video @ 🎬 Standard Analysis tab"
238
+ )
239
+ with gr.Row(equal_height=True, min_height=240):
240
+ with gr.Column(scale=1):
241
+ overlay_video = OverlayVideo(
242
+ value=(None, json_out),
243
+ autoplay=True,
244
+ interactive=False
245
+ )
246
+
247
+
248
+ # Update overlay when JSON changes
249
+ def update_overlay(json_source):
250
+ """Update overlay video with JSON data from analysis or upload."""
251
+ if json_source:
252
+ return OverlayVideo(value=("", json_source), autoplay=True, interactive=False)
253
+ return OverlayVideo(value=("", None), autoplay=True, interactive=False)
254
+
255
+ # Connect JSON output from analysis to overlay
256
+ json_out.change(
257
+ fn=update_overlay,
258
+ inputs=[json_out],
259
+ outputs=[overlay_video]
260
+ )
261
+
262
+ # Tab 3: Agentic Analysis
263
+ with gr.Tab("🤖 Agentic Analysis"):
264
+ gr.Markdown("""
265
+ ### Intelligent Movement Interpretation
266
+ AI-powered analysis using the processed data from the Standard Analysis tab.
267
+ """)
268
+
269
+ with gr.Row(equal_height=True):
270
+ # Left column - Video display (sourced from first tab)
271
+ with gr.Column(scale=1, min_width=400):
272
+ gr.Markdown("**Source Video** *(from Standard Analysis)*")
273
+ agentic_video_display = gr.Video(
274
+ label="Analyzed Video",
275
+ interactive=False,
276
+ height=350
277
+ )
278
+
279
+ # Model info display (sourced from first tab)
280
+ gr.Markdown("**Model Used** *(from Standard Analysis)*")
281
+ agentic_model_display = gr.Textbox(
282
+ label="Pose Model",
283
+ interactive=False,
284
+ value="No analysis completed yet"
285
+ )
286
+
287
+ # Right column - Analysis options and output
288
+ with gr.Column(scale=1, min_width=400):
289
+ gr.Markdown("**Analysis Type**")
290
+ agentic_analysis_type = gr.Radio(
291
+ choices=[
292
+ ("🎯 SUMMARY", "summary"),
293
+ ("📊 STRUCTURED", "structured"),
294
+ ("🔍 MOVEMENT FILTERS", "movement_filters")
295
+ ],
296
+ value="summary",
297
+ label="Choose Analysis",
298
+ info="Select the type of intelligent analysis"
299
+ )
300
+
301
+ # Movement filters options (shown when movement_filters is selected)
302
+ with gr.Group(visible=False) as movement_filter_options:
303
+ gr.Markdown("**Filter Criteria**")
304
+ filter_direction = gr.Dropdown(
305
+ choices=["any", "up", "down", "left", "right", "forward", "backward", "stationary"],
306
+ value="any",
307
+ label="Dominant Direction"
308
+ )
309
+ filter_intensity = gr.Dropdown(
310
+ choices=["any", "low", "medium", "high"],
311
+ value="any",
312
+ label="Movement Intensity"
313
+ )
314
+ filter_min_fluidity = gr.Slider(0.0, 1.0, 0.0, label="Minimum Fluidity Score")
315
+ filter_min_expansion = gr.Slider(0.0, 1.0, 0.0, label="Minimum Expansion Score")
316
+
317
+ analyze_agentic_btn = gr.Button("🚀 Generate Analysis", variant="primary", size="lg")
318
+
319
+ # Output display
320
+ with gr.Accordion("Analysis Results", open=True):
321
+ agentic_output = gr.JSON(label="Intelligent Analysis Results")
322
+
323
+ # Show/hide movement filter options based on selection
324
+ def toggle_filter_options(analysis_type):
325
+ return gr.Group(visible=(analysis_type == "movement_filters"))
326
+
327
+ agentic_analysis_type.change(
328
+ fn=toggle_filter_options,
329
+ inputs=[agentic_analysis_type],
330
+ outputs=[movement_filter_options]
331
+ )
332
+
333
+ # Update video display when standard analysis completes
334
+ def update_agentic_video_display(video_input, url_input, model):
335
+ """Update agentic tab with video and model from standard analysis."""
336
+ video_source = video_input if video_input else url_input
337
+ return video_source, f"Model: {model}"
338
+
339
+ # Link to standard analysis inputs
340
+ video_in.change(
341
+ fn=update_agentic_video_display,
342
+ inputs=[video_in, url_input_enh, model_sel],
343
+ outputs=[agentic_video_display, agentic_model_display]
344
+ )
345
+
346
+ url_input_enh.change(
347
+ fn=update_agentic_video_display,
348
+ inputs=[video_in, url_input_enh, model_sel],
349
+ outputs=[agentic_video_display, agentic_model_display]
350
+ )
351
+
352
+ model_sel.change(
353
+ fn=update_agentic_video_display,
354
+ inputs=[video_in, url_input_enh, model_sel],
355
+ outputs=[agentic_video_display, agentic_model_display]
356
+ )
357
+
358
+ # Hook up the Generate Analysis button
359
+ def process_agentic_analysis(json_data, analysis_type, filter_direction, filter_intensity, filter_min_fluidity, filter_min_expansion):
360
+ """Process agentic analysis based on user selection."""
361
+ return generate_agentic_analysis(
362
+ json_data,
363
+ analysis_type,
364
+ filter_direction,
365
+ filter_intensity,
366
+ filter_min_fluidity,
367
+ filter_min_expansion
368
+ )
369
+
370
+ analyze_agentic_btn.click(
371
+ fn=process_agentic_analysis,
372
+ inputs=[
373
+ json_out, # JSON data from standard analysis
374
+ agentic_analysis_type,
375
+ filter_direction,
376
+ filter_intensity,
377
+ filter_min_fluidity,
378
+ filter_min_expansion
379
+ ],
380
+ outputs=[agentic_output],
381
+ api_name="analyze_agentic"
382
+ )
383
+
384
+ # Auto-update agentic analysis when JSON changes and analysis type is summary
385
+ def auto_update_summary(json_data, analysis_type):
386
+ """Auto-update with summary when new analysis is available."""
387
+ if json_data and analysis_type == "summary":
388
+ return generate_agentic_analysis(json_data, "summary")
389
+ return None
390
+
391
+ json_out.change(
392
+ fn=auto_update_summary,
393
+ inputs=[json_out, agentic_analysis_type],
394
+ outputs=[agentic_output]
395
+ )
396
+
397
+ # Tab 4: About
398
+ with gr.Tab("ℹ️ About"):
399
+ gr.Markdown("""
400
+ # 🩰 Developer Journey: Laban Movement Analysis
401
+
402
+ ## 🎯 Project Vision
403
+
404
+ Created to bridge the gap between traditional **Laban Movement Analysis (LMA)** principles and modern **AI-powered pose estimation**, this platform represents a comprehensive approach to understanding human movement through technology.
405
+
406
+ ## 🛠️ Technical Architecture
407
+
408
+ ### **Core Foundation**
409
+ - **15 Pose Estimation Models** from diverse sources and frameworks
410
+ - **Multi-format Video Processing** with URL support (YouTube, Vimeo, direct links)
411
+ - **Real-time Analysis Pipeline** with configurable model selection
412
+ - **MCP-Compatible API** for AI agent integration
413
+
414
+ ### **Pose Model Ecosystem**
415
+ ```
416
+ 📊 MediaPipe Family (Google) → 3 variants (lite/full/heavy)
417
+ ⚡ MoveNet Family (TensorFlow) → 2 variants (lightning/thunder)
418
+ 🎯 YOLO v8 Family (Ultralytics) → 5 variants (n/s/m/l/x)
419
+ 🔥 YOLO v11 Family (Ultralytics)→ 5 variants (n/s/m/l/x)
420
+ ```
421
+
422
+ ## 🎨 Innovation Highlights
423
+
424
+ ### **1. Custom Gradio Component: `gradio_overlay_video`**
425
+ - **Layered Visualization**: Controlled overlay of pose data on original video
426
+ - **Interactive Controls**: Frame-by-frame analysis with movement metrics
427
+ - **Synchronized Playback**: Real-time correlation between video and data
428
+
429
+ ### **2. Agentic Analysis Engine**
430
+ Beyond raw pose detection, we've developed intelligent interpretation layers:
431
+
432
+ - **🎯 SUMMARY**: Narrative movement interpretation with temporal pattern analysis
433
+ - **📊 STRUCTURED**: Comprehensive quantitative breakdowns with statistical insights
434
+ - **🔍 MOVEMENT FILTERS**: Advanced pattern detection with customizable criteria
435
+
436
+ ### **3. Temporal Pattern Recognition**
437
+ - **Movement Consistency Tracking**: Direction and intensity variation analysis
438
+ - **Complexity Scoring**: Multi-dimensional movement sophistication metrics
439
+ - **Sequence Detection**: Continuous movement pattern identification
440
+ - **Laban Integration**: Professional movement quality assessment using LMA principles
441
+
442
+ ## 📈 Processing Pipeline
443
+
444
+ ```mermaid
445
+ Video Input → Pose Detection → LMA Analysis → JSON Output
446
+ ↓ ↓ ↓ ↓
447
+ URL/Upload → 15 Models → Temporal → Visualization
448
+ ↓ ↓ Patterns ↓
449
+ Preprocessing → Keypoints → Metrics → Agentic Analysis
450
+ ```
451
+
452
+ ## 🎭 Laban Movement Analysis Integration
453
+
454
+ Our implementation translates raw pose coordinates into meaningful movement qualities:
455
+
456
+ - **Effort Qualities**: Intensity, speed, and flow characteristics
457
+ - **Space Usage**: Expansion patterns and directional preferences
458
+ - **Temporal Dynamics**: Rhythm, acceleration, and movement consistency
459
+ - **Quality Assessment**: Fluidity scores and movement sophistication
460
+
461
+ ## 🔬 Technical Achievements
462
+
463
+ ### **Multi-Source Model Integration**
464
+ Successfully unified models from different frameworks:
465
+ - Google's MediaPipe (BlazePose architecture)
466
+ - TensorFlow's MoveNet (lightweight and accurate variants)
467
+ - Ultralytics' YOLO ecosystem (object detection adapted for pose)
468
+
469
+ ### **Real-Time Processing Capabilities**
470
+ - **Streaming Support**: Frame-by-frame processing with temporal continuity
471
+ - **Memory Optimization**: Efficient handling of large video files
472
+ - **Error Recovery**: Graceful handling of pose detection failures
473
+
474
+ ### **Agent-Ready Architecture**
475
+ - **MCP Server Integration**: Compatible with AI agent workflows
476
+ - **Structured API**: RESTful endpoints for programmatic access
477
+ - **Flexible Output Formats**: JSON, visualization videos, and metadata
478
+
479
+ ## 🌟 Future Roadmap
480
+
481
+ - **3D Pose Integration**: Depth-aware movement analysis
482
+ - **Multi-Person Tracking**: Ensemble and group movement dynamics
483
+ - **Real-Time Streaming**: Live movement analysis capabilities
484
+ - **Machine Learning Enhancement**: Custom models trained on movement data
485
+
486
+ ## 🔧 Built With
487
+
488
+ - **Frontend**: Gradio 5.33+ with custom Svelte components
489
+ - **Backend**: Python with FastAPI and async processing
490
+ - **Computer Vision**: MediaPipe, TensorFlow, PyTorch, Ultralytics
491
+ - **Analysis**: NumPy, OpenCV, custom Laban algorithms
492
+ - **Deployment**: Hugging Face Spaces with Docker support
493
+
494
+ ---
495
+
496
+ ### 👨‍💻 Created by **Csaba Bolyós**
497
+
498
+ *Combining classical movement analysis with cutting-edge AI to unlock new possibilities in human movement understanding.*
499
+
500
+ **Connect:**
501
+ [GitHub](https://github.com/bladeszasza) • [Hugging Face](https://huggingface.co/BladeSzaSza) • [LinkedIn](https://www.linkedin.com/in/csaba-bolyós-00a11767/)
502
+
503
+ ---
504
+
505
+ > *"Movement is a language. Technology helps us understand what the body is saying."*
506
+ """)
507
+
508
  # Footer
509
  with gr.Row():
510
  gr.Markdown(
511
  """
512
  **Built by Csaba Bolyós**
513
+ [GitHub](https://github.com/bladeszasza) • [HF](https://huggingface.co/BladeSzaSza) • [LinkedIn](https://www.linkedin.com/in/csaba-bolyós-00a11767/)
514
  """
515
  )
516
  return demo
517
+
518
+
519
  if __name__ == "__main__":
520
  demo = create_demo()
521
  demo.launch(server_name="0.0.0.0",
522
+ share=True,
523
  server_port=int(os.getenv("PORT", 7860)),
524
  mcp_server=True)
space.py CHANGED
@@ -46,8 +46,8 @@ Author: Csaba (BladeSzaSza)
46
 
47
  import gradio as gr
48
  import os
49
- from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
50
- # from gradio_labanmovementanalysis import LabanMovementAnalysis
51
 
52
  # Import agent API if available
53
  # Initialize agent API if available
@@ -59,15 +59,10 @@ try:
59
  MovementDirection,
60
  MovementIntensity
61
  )
62
- HAS_AGENT_API = True
63
-
64
- try:
65
- agent_api = LabanAgentAPI()
66
- except Exception as e:
67
- print(f"Warning: Agent API not available: {e}")
68
- agent_api = None
69
- except ImportError:
70
- HAS_AGENT_API = False
71
  # Initialize components
72
  try:
73
  analyzer = LabanMovementAnalysis(
@@ -99,21 +94,37 @@ def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
99
  error_result = {"error": str(e)}
100
  return error_result, None
101
 
102
- def process_video_standard(video, model, enable_viz, include_keypoints):
103
- \"\"\"Standard video processing function.\"\"\"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  if video is None:
105
- return None, None
106
-
107
  try:
108
- json_output, video_output = analyzer.process_video(
109
  video,
110
  model=model,
111
- enable_visualization=enable_viz,
112
  include_keypoints=include_keypoints
113
  )
114
- return json_output, video_output
115
- except Exception as e:
116
- return {"error": str(e)}, None
117
 
118
  # ── 4. Build UI ─────────────────────────────────────────────────
119
  def create_demo() -> gr.Blocks:
@@ -122,7 +133,7 @@ def create_demo() -> gr.Blocks:
122
  theme='gstaff/sketch',
123
  fill_width=True,
124
  ) as demo:
125
-
126
  # ── Hero banner ──
127
  gr.Markdown(
128
  \"\"\"
@@ -214,10 +225,12 @@ def create_demo() -> gr.Blocks:
214
  \"\"\"
215
  )
216
  return demo
217
-
 
218
  if __name__ == "__main__":
219
  demo = create_demo()
220
  demo.launch(server_name="0.0.0.0",
 
221
  server_port=int(os.getenv("PORT", 7860)),
222
  mcp_server=True)
223
 
 
46
 
47
  import gradio as gr
48
  import os
49
+ # from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
50
+ from gradio_labanmovementanalysis import LabanMovementAnalysis
51
 
52
  # Import agent API if available
53
  # Initialize agent API if available
 
59
  MovementDirection,
60
  MovementIntensity
61
  )
62
+ agent_api = LabanAgentAPI()
63
+ except Exception as e:
64
+ print(f"Warning: Agent API not available: {e}")
65
+ agent_api = None
 
 
 
 
 
66
  # Initialize components
67
  try:
68
  analyzer = LabanMovementAnalysis(
 
94
  error_result = {"error": str(e)}
95
  return error_result, None
96
 
97
+ def process_video_standard(video : str, model : str, include_keypoints : bool) -> dict:
98
+ \"\"\"
99
+ Processes a video file using the specified pose estimation model and returns movement analysis results.
100
+
101
+ Args:
102
+ video (str): Path to the video file to be analyzed.
103
+ model (str): The name of the pose estimation model to use (e.g., "mediapipe-full", "movenet-thunder", etc.).
104
+ include_keypoints (bool): Whether to include raw keypoint data in the output.
105
+
106
+ Returns:
107
+ dict:
108
+ - A dictionary containing the movement analysis results in JSON format, or an error message if processing fails.
109
+
110
+
111
+ Notes:
112
+ - Visualization is disabled in this standard processing function.
113
+ - If the input video is None, both return values will be None.
114
+ - If an error occurs during processing, the first return value will be a dictionary with an "error" key.
115
+ \"\"\"
116
  if video is None:
117
+ return None
 
118
  try:
119
+ json_output, _ = analyzer.process_video(
120
  video,
121
  model=model,
122
+ enable_visualization=False,
123
  include_keypoints=include_keypoints
124
  )
125
+ return json_output
126
+ except (RuntimeError, ValueError, OSError) as e:
127
+ return {"error": str(e)}
128
 
129
  # ── 4. Build UI ─────────────────────────────────────────────────
130
  def create_demo() -> gr.Blocks:
 
133
  theme='gstaff/sketch',
134
  fill_width=True,
135
  ) as demo:
136
+ gr.api(process_video_standard, api_name="process_video")
137
  # ── Hero banner ──
138
  gr.Markdown(
139
  \"\"\"
 
225
  \"\"\"
226
  )
227
  return demo
228
+
229
+
230
  if __name__ == "__main__":
231
  demo = create_demo()
232
  demo.launch(server_name="0.0.0.0",
233
+ share=True,
234
  server_port=int(os.getenv("PORT", 7860)),
235
  mcp_server=True)
236
 
src/.claude/settings.local.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "permissions": {
3
+ "allow": [
4
+ "Bash(/Users/csabi/.nvm/versions/node/v18.20.5/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -n \"smooth_metrics\" /Users/csabi/Develop/Laban-Movement-Analysis/backend/gradio_labanmovementanalysis/notation_engine.py)",
5
+ "Bash(/Users/csabi/.nvm/versions/node/v18.20.5/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -n \"_smooth_metrics\" /Users/csabi/Develop/Laban-Movement-Analysis/backend/gradio_labanmovementanalysis/notation_engine.py)",
6
+ "Bash(/Users/csabi/.nvm/versions/node/v18.20.5/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -n \"# Footer\" /Users/csabi/Develop/Laban-Movement-Analysis/demo/app.py)",
7
+ "Bash(find:*)"
8
+ ],
9
+ "deny": []
10
+ }
11
+ }
src/.gitignore CHANGED
@@ -327,4 +327,7 @@ tb_logs/
327
  outputs/
328
  .hydra/
329
 
330
- .working/
 
 
 
 
327
  outputs/
328
  .hydra/
329
 
330
+ .working/
331
+
332
+ # Claude Code project instructions
333
+ CLAUDE.md
src/.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
src/README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Laban Movement Analysis
3
- emoji: 🏃
4
  colorFrom: purple
5
  colorTo: green
6
  app_file: app.py
@@ -20,14 +20,43 @@ tags:
20
  - mediapipe
21
  - yolo
22
  - gradio
23
- short_description: Professional movement analysis with pose estimation and AI
 
 
 
24
  license: apache-2.0
25
  ---
26
 
27
- # `gradio_labanmovementanalysis`
28
  <a href="https://pypi.org/project/gradio_labanmovementanalysis/" target="_blank"><img alt="PyPI - Version" src="https://img.shields.io/pypi/v/gradio_labanmovementanalysis"></a>
29
 
30
- A Gradio 5 component for video movement analysis using Laban Movement Analysis (LMA) with MCP support for AI agents
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  ## Installation
33
 
@@ -46,8 +75,8 @@ Author: Csaba (BladeSzaSza)
46
 
47
  import gradio as gr
48
  import os
49
- from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
50
- # from gradio_labanmovementanalysis import LabanMovementAnalysis
51
 
52
  # Import agent API if available
53
  # Initialize agent API if available
@@ -59,15 +88,10 @@ try:
59
  MovementDirection,
60
  MovementIntensity
61
  )
62
- HAS_AGENT_API = True
63
-
64
- try:
65
- agent_api = LabanAgentAPI()
66
- except Exception as e:
67
- print(f"Warning: Agent API not available: {e}")
68
- agent_api = None
69
- except ImportError:
70
- HAS_AGENT_API = False
71
  # Initialize components
72
  try:
73
  analyzer = LabanMovementAnalysis(
@@ -99,21 +123,38 @@ def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
99
  error_result = {"error": str(e)}
100
  return error_result, None
101
 
102
- def process_video_standard(video, model, enable_viz, include_keypoints):
103
- """Standard video processing function."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  if video is None:
105
- return None, None
106
-
107
  try:
108
- json_output, video_output = analyzer.process_video(
109
  video,
110
  model=model,
111
- enable_visualization=enable_viz,
112
  include_keypoints=include_keypoints
113
  )
114
- return json_output, video_output
115
- except Exception as e:
116
- return {"error": str(e)}, None
 
 
117
 
118
  # ── 4. Build UI ────��────────────────────────────────────────────
119
  def create_demo() -> gr.Blocks:
@@ -122,7 +163,7 @@ def create_demo() -> gr.Blocks:
122
  theme='gstaff/sketch',
123
  fill_width=True,
124
  ) as demo:
125
-
126
  # ── Hero banner ──
127
  gr.Markdown(
128
  """
@@ -214,10 +255,15 @@ def create_demo() -> gr.Blocks:
214
  """
215
  )
216
  return demo
217
-
 
 
 
 
218
  if __name__ == "__main__":
219
  demo = create_demo()
220
  demo.launch(server_name="0.0.0.0",
 
221
  server_port=int(os.getenv("PORT", 7860)),
222
  mcp_server=True)
223
 
 
1
  ---
2
  title: Laban Movement Analysis
3
+ emoji: 🩰
4
  colorFrom: purple
5
  colorTo: green
6
  app_file: app.py
 
20
  - mediapipe
21
  - yolo
22
  - gradio
23
+ - agentic-analysis
24
+ - overlay-video
25
+ - temporal-patterns
26
+ short_description: Laban Movement Analysis (LMA) from pose estimation
27
  license: apache-2.0
28
  ---
29
 
30
+ # 🩰 Laban Movement Analysis
31
  <a href="https://pypi.org/project/gradio_labanmovementanalysis/" target="_blank"><img alt="PyPI - Version" src="https://img.shields.io/pypi/v/gradio_labanmovementanalysis"></a>
32
 
33
+ **Advanced video movement analysis platform** combining Laban Movement Analysis (LMA) principles with modern AI pose estimation, intelligent analysis, and interactive visualization.
34
+
35
+ ## 🌟 Key Features
36
+
37
+ ### 📊 **Multi-Model Pose Estimation**
38
+ - **15 different pose estimation models** from multiple sources:
39
+ - **MediaPipe**: `mediapipe-lite`, `mediapipe-full`, `mediapipe-heavy`
40
+ - **MoveNet**: `movenet-lightning`, `movenet-thunder`
41
+ - **YOLO v8**: `yolo-v8-n/s/m/l/x` (5 variants)
42
+ - **YOLO v11**: `yolo-v11-n/s/m/l/x` (5 variants)
43
+
44
+ ### 🎥 **Comprehensive Video Processing**
45
+ - **JSON Analysis Output**: Detailed movement metrics with temporal data
46
+ - **Annotated Video Generation**: Pose overlay with Laban movement data
47
+ - **URL Support**: Direct processing from YouTube, Vimeo, and video URLs
48
+ - **Custom Overlay Component**: `gradio_overlay_video` for controlled layered visualization
49
+
50
+ ### 🤖 **Agentic Intelligence**
51
+ - **SUMMARY Analysis**: Narrative movement interpretation with temporal patterns
52
+ - **STRUCTURED Analysis**: Quantitative breakdowns and statistical insights
53
+ - **MOVEMENT FILTERS**: Pattern detection with intelligent filtering
54
+ - **Laban Interpretation**: Professional movement quality assessment
55
+
56
+ ### 🎨 **Interactive Visualization**
57
+ - **Standard Analysis Tab**: Core pose estimation and LMA processing
58
+ - **Overlay Visualization Tab**: Interactive layered video display
59
+ - **Agentic Analysis Tab**: AI-powered movement insights and filtering
60
 
61
  ## Installation
62
 
 
75
 
76
  import gradio as gr
77
  import os
78
+ # from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
79
+ from gradio_labanmovementanalysis import LabanMovementAnalysis
80
 
81
  # Import agent API if available
82
  # Initialize agent API if available
 
88
  MovementDirection,
89
  MovementIntensity
90
  )
91
+ agent_api = LabanAgentAPI()
92
+ except Exception as e:
93
+ print(f"Warning: Agent API not available: {e}")
94
+ agent_api = None
 
 
 
 
 
95
  # Initialize components
96
  try:
97
  analyzer = LabanMovementAnalysis(
 
123
  error_result = {"error": str(e)}
124
  return error_result, None
125
 
126
+ def process_video_standard(video : str, model : str, include_keypoints : bool) -> dict:
127
+ """
128
+ Processes a video file using the specified pose estimation model and returns movement analysis results.
129
+
130
+ Args:
131
+ video (str): Path to the video file to be analyzed.
132
+ model (str): The name of the pose estimation model to use (e.g., "mediapipe-full", "movenet-thunder", etc.).
133
+ include_keypoints (bool): Whether to include raw keypoint data in the output.
134
+
135
+ Returns:
136
+ dict:
137
+ - A dictionary containing the movement analysis results in JSON format, or an error message if processing fails.
138
+
139
+
140
+ Notes:
141
+ - Visualization is disabled in this standard processing function.
142
+ - If the input video is None, both return values will be None.
143
+ - If an error occurs during processing, the first return value will be a dictionary with an "error" key.
144
+ """
145
  if video is None:
146
+ return None
 
147
  try:
148
+ json_output = analyzer.process(
149
  video,
150
  model=model,
 
151
  include_keypoints=include_keypoints
152
  )
153
+
154
+
155
+ return json_output
156
+ except (RuntimeError, ValueError, OSError) as e:
157
+ return {"error": str(e)}
158
 
159
  # ── 4. Build UI ────��────────────────────────────────────────────
160
  def create_demo() -> gr.Blocks:
 
163
  theme='gstaff/sketch',
164
  fill_width=True,
165
  ) as demo:
166
+ # gr.api(process_video_standard, api_name="process_video") # <-- Remove from here
167
  # ── Hero banner ──
168
  gr.Markdown(
169
  """
 
255
  """
256
  )
257
  return demo
258
+
259
+ # Register API endpoint OUTSIDE the UI
260
+
261
+ gr.api(process_video_standard, api_name="process_video")
262
+
263
  if __name__ == "__main__":
264
  demo = create_demo()
265
  demo.launch(server_name="0.0.0.0",
266
+ share=True,
267
  server_port=int(os.getenv("PORT", 7860)),
268
  mcp_server=True)
269
 
src/backend/gradio_labanmovementanalysis/agentic_analysis.py ADDED
@@ -0,0 +1,450 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Agentic Analysis Module for Laban Movement Analysis
3
+
4
+ This module provides intelligent analysis capabilities that go beyond raw pose detection
5
+ to offer meaningful insights about movement patterns, quality, and characteristics.
6
+ """
7
+
8
+ import numpy as np
9
+ from collections import Counter
10
+ from typing import Dict, List, Any, Optional
11
+
12
+
13
+ def generate_agentic_analysis(
14
+ json_data: Dict[str, Any],
15
+ analysis_type: str,
16
+ filter_direction: str = "any",
17
+ filter_intensity: str = "any",
18
+ filter_min_fluidity: float = 0.0,
19
+ filter_min_expansion: float = 0.0
20
+ ) -> Dict[str, Any]:
21
+ """
22
+ Generate intelligent analysis based on JSON data and selected type.
23
+
24
+ Args:
25
+ json_data: Movement analysis data from pose estimation
26
+ analysis_type: Type of analysis ("summary", "structured", "movement_filters")
27
+ filter_direction: Direction filter for movement_filters analysis
28
+ filter_intensity: Intensity filter for movement_filters analysis
29
+ filter_min_fluidity: Minimum fluidity threshold for movement_filters
30
+ filter_min_expansion: Minimum expansion threshold for movement_filters
31
+
32
+ Returns:
33
+ Dictionary containing analysis results or error information
34
+ """
35
+ if not json_data or "error" in json_data:
36
+ return {"error": "No valid analysis data available. Please run Standard Analysis first."}
37
+
38
+ try:
39
+ # Extract movement data
40
+ if "movement_analysis" not in json_data or "frames" not in json_data["movement_analysis"]:
41
+ return {"error": "Invalid analysis format - missing movement data"}
42
+
43
+ frames = json_data["movement_analysis"]["frames"]
44
+ video_info = json_data.get("video_info", {})
45
+ model_info = json_data.get("analysis_metadata", {}).get("model_info", {})
46
+
47
+ if not frames:
48
+ return {"error": "No movement data found in analysis"}
49
+
50
+ if analysis_type == "summary":
51
+ return generate_summary_analysis(frames, video_info, model_info)
52
+ elif analysis_type == "structured":
53
+ return generate_structured_analysis(frames, video_info, model_info)
54
+ elif analysis_type == "movement_filters":
55
+ return generate_movement_filter_analysis(
56
+ frames, video_info, model_info,
57
+ filter_direction, filter_intensity,
58
+ filter_min_fluidity, filter_min_expansion
59
+ )
60
+ else:
61
+ return {"error": "Unknown analysis type"}
62
+
63
+ except Exception as e:
64
+ return {"error": f"Analysis failed: {str(e)}"}
65
+
66
+
67
+ def generate_summary_analysis(frames: List[Dict], video_info: Dict, model_info: Dict) -> Dict[str, Any]:
68
+ """
69
+ Generate a comprehensive movement summary with temporal patterns.
70
+
71
+ Args:
72
+ frames: List of frame analysis data
73
+ video_info: Video metadata
74
+ model_info: Model information
75
+
76
+ Returns:
77
+ Dictionary containing summary analysis with narrative interpretation
78
+ """
79
+ # Extract metrics
80
+ directions = [f.get("metrics", {}).get("direction", "stationary") for f in frames]
81
+ intensities = [f.get("metrics", {}).get("intensity", "low") for f in frames]
82
+ speeds = [f.get("metrics", {}).get("speed", "slow") for f in frames]
83
+ velocities = [f.get("metrics", {}).get("velocity", 0) for f in frames]
84
+ accelerations = [f.get("metrics", {}).get("acceleration", 0) for f in frames]
85
+ fluidities = [f.get("metrics", {}).get("fluidity", 0) for f in frames]
86
+ expansions = [f.get("metrics", {}).get("expansion", 0) for f in frames]
87
+
88
+ # Calculate dominant characteristics
89
+ dominant_direction = Counter(directions).most_common(1)[0][0]
90
+ dominant_intensity = Counter(intensities).most_common(1)[0][0]
91
+ dominant_speed = Counter(speeds).most_common(1)[0][0]
92
+
93
+ # Calculate temporal patterns
94
+ direction_changes = sum(1 for i in range(1, len(directions)) if directions[i] != directions[i-1])
95
+ intensity_changes = sum(1 for i in range(1, len(intensities)) if intensities[i] != intensities[i-1])
96
+
97
+ # Statistical analysis
98
+ avg_velocity = np.mean(velocities) if velocities else 0
99
+ max_velocity = np.max(velocities) if velocities else 0
100
+ avg_acceleration = np.mean(accelerations) if accelerations else 0
101
+ avg_fluidity = np.mean(fluidities) if fluidities else 0
102
+ avg_expansion = np.mean(expansions) if expansions else 0
103
+
104
+ # Movement complexity score
105
+ complexity_score = (direction_changes / len(frames)) * 0.4 + (intensity_changes / len(frames)) * 0.3 + (avg_fluidity * 0.3)
106
+
107
+ # Generate narrative summary
108
+ duration = video_info.get("duration_seconds", len(frames) / video_info.get("fps", 25))
109
+
110
+ return {
111
+ "analysis_type": "Movement Summary",
112
+ "model_used": model_info.get("name", "unknown"),
113
+ "video_duration": f"{duration:.1f} seconds",
114
+ "total_frames": len(frames),
115
+
116
+ "dominant_characteristics": {
117
+ "primary_direction": dominant_direction,
118
+ "primary_intensity": dominant_intensity,
119
+ "primary_speed": dominant_speed
120
+ },
121
+
122
+ "temporal_patterns": {
123
+ "direction_transitions": direction_changes,
124
+ "intensity_variations": intensity_changes,
125
+ "movement_consistency": f"{(1 - direction_changes/len(frames))*100:.1f}%",
126
+ "complexity_score": f"{complexity_score:.3f}"
127
+ },
128
+
129
+ "movement_quality": {
130
+ "average_fluidity": f"{avg_fluidity:.3f}",
131
+ "average_expansion": f"{avg_expansion:.3f}",
132
+ "peak_velocity": f"{max_velocity:.3f}",
133
+ "average_velocity": f"{avg_velocity:.3f}"
134
+ },
135
+
136
+ "narrative_summary": f"This {duration:.1f}-second movement sequence shows predominantly {dominant_direction} movement with {dominant_intensity} intensity. "
137
+ f"The performer demonstrates {direction_changes} directional changes and {intensity_changes} intensity variations, "
138
+ f"indicating a {'complex' if complexity_score > 0.3 else 'simple'} movement pattern. "
139
+ f"Movement quality shows {avg_fluidity:.2f} fluidity and {avg_expansion:.2f} spatial expansion, "
140
+ f"suggesting {'expressive' if avg_expansion > 0.5 else 'contained'} movement vocabulary.",
141
+
142
+ "laban_interpretation": {
143
+ "effort_qualities": f"Primary effort: {dominant_intensity} intensity with {dominant_speed} timing",
144
+ "space_usage": f"{'Expansive' if avg_expansion > 0.5 else 'Contracted'} spatial patterns",
145
+ "flow_quality": f"{'Bound' if avg_fluidity < 0.3 else 'Free' if avg_fluidity > 0.7 else 'Balanced'} flow"
146
+ }
147
+ }
148
+
149
+
150
+ def generate_structured_analysis(frames: List[Dict], video_info: Dict, model_info: Dict) -> Dict[str, Any]:
151
+ """
152
+ Generate detailed structured analysis with metrics breakdown.
153
+
154
+ Args:
155
+ frames: List of frame analysis data
156
+ video_info: Video metadata
157
+ model_info: Model information
158
+
159
+ Returns:
160
+ Dictionary containing detailed quantitative analysis
161
+ """
162
+ # Extract all metrics
163
+ directions = [f.get("metrics", {}).get("direction", "stationary") for f in frames]
164
+ intensities = [f.get("metrics", {}).get("intensity", "low") for f in frames]
165
+ speeds = [f.get("metrics", {}).get("speed", "slow") for f in frames]
166
+ velocities = [f.get("metrics", {}).get("velocity", 0) for f in frames]
167
+ accelerations = [f.get("metrics", {}).get("acceleration", 0) for f in frames]
168
+ fluidities = [f.get("metrics", {}).get("fluidity", 0) for f in frames]
169
+ expansions = [f.get("metrics", {}).get("expansion", 0) for f in frames]
170
+
171
+ # Direction analysis
172
+ direction_stats = Counter(directions)
173
+ direction_percentages = {k: (v/len(frames))*100 for k, v in direction_stats.items()}
174
+
175
+ # Intensity analysis
176
+ intensity_stats = Counter(intensities)
177
+ intensity_percentages = {k: (v/len(frames))*100 for k, v in intensity_stats.items()}
178
+
179
+ # Speed analysis
180
+ speed_stats = Counter(speeds)
181
+ speed_percentages = {k: (v/len(frames))*100 for k, v in speed_stats.items()}
182
+
183
+ # Temporal segmentation (divide into segments)
184
+ segment_size = max(1, len(frames) // 8) # 8 segments
185
+ segments = []
186
+ for i in range(0, len(frames), segment_size):
187
+ segment_frames = frames[i:i+segment_size]
188
+ if segment_frames:
189
+ seg_directions = [f.get("metrics", {}).get("direction", "stationary") for f in segment_frames]
190
+ seg_intensities = [f.get("metrics", {}).get("intensity", "low") for f in segment_frames]
191
+ seg_fluidities = [f.get("metrics", {}).get("fluidity", 0) for f in segment_frames]
192
+
193
+ segments.append({
194
+ "segment_index": len(segments) + 1,
195
+ "time_range": f"{i/video_info.get('fps', 25):.1f}-{(i+len(segment_frames))/video_info.get('fps', 25):.1f}s",
196
+ "dominant_direction": Counter(seg_directions).most_common(1)[0][0],
197
+ "dominant_intensity": Counter(seg_intensities).most_common(1)[0][0],
198
+ "average_fluidity": np.mean(seg_fluidities) if seg_fluidities else 0
199
+ })
200
+
201
+ return {
202
+ "analysis_type": "Structured Analysis",
203
+ "model_used": model_info.get("name", "unknown"),
204
+ "video_info": {
205
+ "duration": video_info.get("duration_seconds", 0),
206
+ "fps": video_info.get("fps", 0),
207
+ "resolution": f"{video_info.get('width', 0)}x{video_info.get('height', 0)}",
208
+ "total_frames": len(frames)
209
+ },
210
+
211
+ "direction_breakdown": {
212
+ "statistics": dict(direction_stats),
213
+ "percentages": {k: f"{v:.1f}%" for k, v in direction_percentages.items()},
214
+ "most_common": direction_stats.most_common(3)
215
+ },
216
+
217
+ "intensity_breakdown": {
218
+ "statistics": dict(intensity_stats),
219
+ "percentages": {k: f"{v:.1f}%" for k, v in intensity_percentages.items()},
220
+ "distribution": intensity_stats.most_common()
221
+ },
222
+
223
+ "speed_breakdown": {
224
+ "statistics": dict(speed_stats),
225
+ "percentages": {k: f"{v:.1f}%" for k, v in speed_percentages.items()},
226
+ "distribution": speed_stats.most_common()
227
+ },
228
+
229
+ "quantitative_metrics": {
230
+ "velocity": {
231
+ "mean": f"{np.mean(velocities):.4f}",
232
+ "std": f"{np.std(velocities):.4f}",
233
+ "min": f"{np.min(velocities):.4f}",
234
+ "max": f"{np.max(velocities):.4f}"
235
+ },
236
+ "acceleration": {
237
+ "mean": f"{np.mean(accelerations):.4f}",
238
+ "std": f"{np.std(accelerations):.4f}",
239
+ "min": f"{np.min(accelerations):.4f}",
240
+ "max": f"{np.max(accelerations):.4f}"
241
+ },
242
+ "fluidity": {
243
+ "mean": f"{np.mean(fluidities):.4f}",
244
+ "std": f"{np.std(fluidities):.4f}",
245
+ "min": f"{np.min(fluidities):.4f}",
246
+ "max": f"{np.max(fluidities):.4f}"
247
+ },
248
+ "expansion": {
249
+ "mean": f"{np.mean(expansions):.4f}",
250
+ "std": f"{np.std(expansions):.4f}",
251
+ "min": f"{np.min(expansions):.4f}",
252
+ "max": f"{np.max(expansions):.4f}"
253
+ }
254
+ },
255
+
256
+ "temporal_segments": segments,
257
+
258
+ "movement_patterns": {
259
+ "consistency_index": f"{1 - (len(set(directions))/len(frames)):.3f}",
260
+ "dynamic_range": f"{np.max(velocities) - np.min(velocities):.4f}",
261
+ "complexity_score": f"{len(set(directions)) * len(set(intensities)) / len(frames):.3f}"
262
+ }
263
+ }
264
+
265
+
266
+ def generate_movement_filter_analysis(
267
+ frames: List[Dict],
268
+ video_info: Dict,
269
+ model_info: Dict,
270
+ filter_direction: str,
271
+ filter_intensity: str,
272
+ filter_min_fluidity: float,
273
+ filter_min_expansion: float
274
+ ) -> Dict[str, Any]:
275
+ """
276
+ Generate movement filter analysis with pattern detection.
277
+
278
+ Args:
279
+ frames: List of frame analysis data
280
+ video_info: Video metadata
281
+ model_info: Model information
282
+ filter_direction: Direction filter criteria
283
+ filter_intensity: Intensity filter criteria
284
+ filter_min_fluidity: Minimum fluidity threshold
285
+ filter_min_expansion: Minimum expansion threshold
286
+
287
+ Returns:
288
+ Dictionary containing filter analysis results and recommendations
289
+ """
290
+ # Apply filters
291
+ matching_frames = []
292
+ total_frames = len(frames)
293
+
294
+ for frame in frames:
295
+ metrics = frame.get("metrics", {})
296
+ direction = metrics.get("direction", "stationary")
297
+ intensity = metrics.get("intensity", "low")
298
+ fluidity = metrics.get("fluidity", 0)
299
+ expansion = metrics.get("expansion", 0)
300
+
301
+ # Check filters
302
+ direction_match = filter_direction == "any" or direction == filter_direction
303
+ intensity_match = filter_intensity == "any" or intensity == filter_intensity
304
+ fluidity_match = fluidity >= filter_min_fluidity
305
+ expansion_match = expansion >= filter_min_expansion
306
+
307
+ if direction_match and intensity_match and fluidity_match and expansion_match:
308
+ matching_frames.append(frame)
309
+
310
+ match_percentage = (len(matching_frames) / total_frames) * 100 if total_frames > 0 else 0
311
+
312
+ # Pattern detection in matching frames
313
+ if matching_frames:
314
+ match_velocities = [f.get("metrics", {}).get("velocity", 0) for f in matching_frames]
315
+ match_accelerations = [f.get("metrics", {}).get("acceleration", 0) for f in matching_frames]
316
+
317
+ # Find continuous sequences
318
+ frame_indices = [f.get("frame_index", 0) for f in matching_frames]
319
+ sequences = []
320
+ if frame_indices:
321
+ current_seq = [frame_indices[0]]
322
+ for i in range(1, len(frame_indices)):
323
+ if frame_indices[i] == frame_indices[i-1] + 1:
324
+ current_seq.append(frame_indices[i])
325
+ else:
326
+ if len(current_seq) > 1:
327
+ sequences.append(current_seq)
328
+ current_seq = [frame_indices[i]]
329
+ if len(current_seq) > 1:
330
+ sequences.append(current_seq)
331
+
332
+ longest_sequence = max(sequences, key=len) if sequences else []
333
+
334
+ pattern_analysis = {
335
+ "total_matching_frames": len(matching_frames),
336
+ "match_percentage": f"{match_percentage:.1f}%",
337
+ "continuous_sequences": len(sequences),
338
+ "longest_sequence": {
339
+ "length": len(longest_sequence),
340
+ "start_frame": longest_sequence[0] if longest_sequence else 0,
341
+ "end_frame": longest_sequence[-1] if longest_sequence else 0,
342
+ "duration": f"{len(longest_sequence) / video_info.get('fps', 25):.2f}s" if longest_sequence else "0s"
343
+ },
344
+ "velocity_in_matches": {
345
+ "mean": f"{np.mean(match_velocities):.4f}" if match_velocities else "0",
346
+ "peak": f"{np.max(match_velocities):.4f}" if match_velocities else "0"
347
+ }
348
+ }
349
+ else:
350
+ pattern_analysis = {
351
+ "total_matching_frames": 0,
352
+ "match_percentage": "0%",
353
+ "message": "No frames match the specified criteria"
354
+ }
355
+
356
+ return {
357
+ "analysis_type": "Movement Filter Analysis",
358
+ "model_used": model_info.get("name", "unknown"),
359
+ "filter_criteria": {
360
+ "direction": filter_direction,
361
+ "intensity": filter_intensity,
362
+ "min_fluidity": filter_min_fluidity,
363
+ "min_expansion": filter_min_expansion
364
+ },
365
+ "results": pattern_analysis,
366
+ "recommendations": generate_filter_recommendations(matching_frames, total_frames, filter_direction, filter_intensity)
367
+ }
368
+
369
+
370
+ def generate_filter_recommendations(matching_frames: List[Dict], total_frames: int, filter_direction: str, filter_intensity: str) -> str:
371
+ """
372
+ Generate recommendations based on filter results.
373
+
374
+ Args:
375
+ matching_frames: List of frames that match filter criteria
376
+ total_frames: Total number of frames analyzed
377
+ filter_direction: Applied direction filter
378
+ filter_intensity: Applied intensity filter
379
+
380
+ Returns:
381
+ String containing intelligent recommendations
382
+ """
383
+ match_ratio = len(matching_frames) / total_frames if total_frames > 0 else 0
384
+
385
+ if match_ratio > 0.7:
386
+ return f"Strong pattern detected: {match_ratio*100:.1f}% of movement matches your criteria. This suggests consistent {filter_direction} movement with {filter_intensity} intensity."
387
+ elif match_ratio > 0.3:
388
+ return f"Moderate pattern: {match_ratio*100:.1f}% match suggests intermittent {filter_direction} movement patterns. Consider analyzing temporal distribution."
389
+ elif match_ratio > 0.1:
390
+ return f"Weak pattern: Only {match_ratio*100:.1f}% match. The movement may be more varied than your filter criteria suggest."
391
+ else:
392
+ return f"No significant pattern found ({match_ratio*100:.1f}% match). Consider broadening filter criteria or analyzing different movement qualities."
393
+
394
+
395
+ def process_standard_for_agent(json_data: Dict[str, Any], output_format: str = "summary") -> Dict[str, Any]:
396
+ """
397
+ Convert standard analysis JSON to agent format.
398
+
399
+ Args:
400
+ json_data: Standard movement analysis data
401
+ output_format: Desired output format ("summary", "structured", "json")
402
+
403
+ Returns:
404
+ Dictionary containing converted analysis in agent format
405
+ """
406
+ if not json_data or "error" in json_data:
407
+ return json_data
408
+
409
+ try:
410
+ # Extract key metrics from standard analysis
411
+ if "movement_analysis" in json_data and "frames" in json_data["movement_analysis"]:
412
+ frames = json_data["movement_analysis"]["frames"]
413
+ if not frames:
414
+ return {"error": "No movement data found"}
415
+
416
+ # Compute dominant characteristics
417
+ directions = [f.get("metrics", {}).get("direction", "stationary") for f in frames]
418
+ intensities = [f.get("metrics", {}).get("intensity", "low") for f in frames]
419
+ speeds = [f.get("metrics", {}).get("speed", "slow") for f in frames]
420
+ fluidities = [f.get("metrics", {}).get("fluidity", 0.0) for f in frames]
421
+ expansions = [f.get("metrics", {}).get("expansion", 0.5) for f in frames]
422
+
423
+ # Find dominant values
424
+ dominant_direction = Counter(directions).most_common(1)[0][0]
425
+ dominant_intensity = Counter(intensities).most_common(1)[0][0]
426
+ dominant_speed = Counter(speeds).most_common(1)[0][0]
427
+ avg_fluidity = sum(fluidities) / len(fluidities) if fluidities else 0.0
428
+ avg_expansion = sum(expansions) / len(expansions) if expansions else 0.5
429
+
430
+ if output_format == "summary":
431
+ return {
432
+ "summary": f"Movement Analysis: Predominantly {dominant_direction} direction with {dominant_intensity} intensity. "
433
+ f"Speed: {dominant_speed}. Fluidity: {avg_fluidity:.2f}, Expansion: {avg_expansion:.2f}"
434
+ }
435
+ elif output_format == "structured":
436
+ return {
437
+ "success": True,
438
+ "direction": dominant_direction,
439
+ "intensity": dominant_intensity,
440
+ "speed": dominant_speed,
441
+ "fluidity": avg_fluidity,
442
+ "expansion": avg_expansion,
443
+ "segments": len(frames)
444
+ }
445
+ else: # json
446
+ return json_data
447
+
448
+ return {"error": "Invalid analysis format"}
449
+ except Exception as e:
450
+ return {"error": f"Conversion failed: {str(e)}"}
src/backend/gradio_labanmovementanalysis/labanmovementanalysis.py CHANGED
@@ -283,7 +283,25 @@ class LabanMovementAnalysis(Component):
283
  """
284
  return self.process_video(video_path, **kwargs)
285
 
 
 
 
 
 
 
 
 
286
 
 
 
 
 
 
 
 
 
 
 
287
 
288
  # SkateFormer methods moved to Version 2 development
289
  # get_skateformer_compatibility() and get_skateformer_status_report()
 
283
  """
284
  return self.process_video(video_path, **kwargs)
285
 
286
+ def process(self, video_input: Union[str, os.PathLike], model: str = DEFAULT_MODEL, include_keypoints: bool = False) -> Dict[str, Any]:
287
+ """
288
+ Processes a video and returns only the JSON analysis result.
289
+
290
+ Args:
291
+ video_input: Path to input video, video URL, or file object
292
+ model: Pose estimation model to use
293
+ include_keypoints: Whether to include keypoints in JSON
294
 
295
+ Returns:
296
+ dict: Movement analysis results in JSON format
297
+ """
298
+ json_output, _ = self.process_video(
299
+ video_input,
300
+ model=model,
301
+ enable_visualization=False,
302
+ include_keypoints=include_keypoints
303
+ )
304
+ return json_output
305
 
306
  # SkateFormer methods moved to Version 2 development
307
  # get_skateformer_compatibility() and get_skateformer_status_report()
src/backend/gradio_labanmovementanalysis/labanmovementanalysis.pyi CHANGED
@@ -16,18 +16,13 @@ from .json_generator import generate_json, format_for_display
16
  from .visualizer import PoseVisualizer
17
  from .video_downloader import SmartVideoInput
18
 
19
- # Optional advanced features
20
- try:
21
- from .skateformer_integration import SkateFormerAnalyzer
22
- HAS_SKATEFORMER = True
23
- except ImportError:
24
- HAS_SKATEFORMER = False
25
 
26
- try:
27
- from .webrtc_handler import WebRTCMovementAnalyzer, WebRTCGradioInterface
28
- HAS_WEBRTC = True
29
- except ImportError:
30
- HAS_WEBRTC = False
31
 
32
  from gradio.events import Dependency
33
 
@@ -44,7 +39,7 @@ class LabanMovementAnalysis(Component):
44
  default_model: str = DEFAULT_MODEL,
45
  enable_visualization: bool = True,
46
  include_keypoints: bool = False,
47
- enable_webrtc: bool = False,
48
  label: Optional[str] = None,
49
  every: Optional[float] = None,
50
  show_label: Optional[bool] = None,
@@ -57,6 +52,7 @@ class LabanMovementAnalysis(Component):
57
  elem_classes: Optional[List[str]] = None,
58
  render: bool = True,
59
  **kwargs):
 
60
  """
61
  Initialize the Laban Movement Analysis component.
62
 
@@ -64,7 +60,7 @@ class LabanMovementAnalysis(Component):
64
  default_model: Default pose estimation model ("mediapipe", "movenet", "yolo")
65
  enable_visualization: Whether to generate visualization video by default
66
  include_keypoints: Whether to include raw keypoints in JSON output
67
- enable_webrtc: Whether to enable WebRTC real-time analysis
68
  label: Component label
69
  ... (other standard Gradio component args)
70
  """
@@ -86,8 +82,6 @@ class LabanMovementAnalysis(Component):
86
  self.default_model = default_model
87
  self.enable_visualization = enable_visualization
88
  self.include_keypoints = include_keypoints
89
- self.enable_webrtc = enable_webrtc and HAS_WEBRTC
90
-
91
  # Cache for pose estimators
92
  self._estimators = {}
93
 
@@ -95,16 +89,9 @@ class LabanMovementAnalysis(Component):
95
  self.video_input = SmartVideoInput()
96
 
97
  # SkateFormer features reserved for Version 2
98
-
99
- self.webrtc_analyzer = None
100
- if self.enable_webrtc:
101
- try:
102
- self.webrtc_analyzer = WebRTCMovementAnalyzer(model=default_model)
103
- except Exception as e:
104
- print(f"Warning: Failed to initialize WebRTC: {e}")
105
- self.enable_webrtc = False
106
 
107
  def preprocess(self, payload: Dict[str, Any]) -> Dict[str, Any]:
 
108
  """
109
  Preprocess input from the frontend.
110
 
@@ -142,6 +129,7 @@ class LabanMovementAnalysis(Component):
142
  return options
143
 
144
  def postprocess(self, value: Any) -> Dict[str, Any]:
 
145
  """
146
  Postprocess analysis results for the frontend.
147
 
@@ -169,6 +157,7 @@ class LabanMovementAnalysis(Component):
169
  def process_video(self, video_input: Union[str, os.PathLike], model: str = DEFAULT_MODEL,
170
  enable_visualization: bool = True,
171
  include_keypoints: bool = False) -> Tuple[Dict[str, Any], Optional[str]]:
 
172
  """
173
  Main processing function that performs pose analysis on a video.
174
 
@@ -282,6 +271,7 @@ class LabanMovementAnalysis(Component):
282
  return json_output, visualization_path
283
 
284
  def __call__(self, video_path: str, **kwargs) -> Tuple[Dict[str, Any], Optional[str]]:
 
285
  """
286
  Make the component callable for easy use.
287
 
@@ -294,77 +284,39 @@ class LabanMovementAnalysis(Component):
294
  """
295
  return self.process_video(video_path, **kwargs)
296
 
297
- def start_webrtc_stream(self, model: str = None) -> bool:
298
  """
299
- Start WebRTC real-time analysis stream.
300
-
301
  Args:
302
- model: Pose model to use for real-time analysis
303
-
304
- Returns:
305
- True if stream started successfully
306
- """
307
- if not self.enable_webrtc or not self.webrtc_analyzer:
308
- print("WebRTC not enabled or available")
309
- return False
310
-
311
- try:
312
- if model:
313
- self.webrtc_analyzer.model = model
314
- self.webrtc_analyzer.pose_estimator = get_pose_estimator(model)
315
-
316
- self.webrtc_analyzer.start_stream()
317
- print(f"WebRTC stream started with {self.webrtc_analyzer.model} model")
318
- return True
319
- except Exception as e:
320
- print(f"Failed to start WebRTC stream: {e}")
321
- return False
322
-
323
- def stop_webrtc_stream(self) -> bool:
324
- """
325
- Stop WebRTC real-time analysis stream.
326
-
327
- Returns:
328
- True if stream stopped successfully
329
- """
330
- if not self.webrtc_analyzer:
331
- return False
332
-
333
- try:
334
- self.webrtc_analyzer.stop_stream()
335
- print("WebRTC stream stopped")
336
- return True
337
- except Exception as e:
338
- print(f"Failed to stop WebRTC stream: {e}")
339
- return False
340
-
341
- def get_webrtc_interface(self):
342
- """
343
- Get WebRTC Gradio interface for real-time streaming.
344
-
345
  Returns:
346
- WebRTCGradioInterface instance or None
347
  """
348
- if not self.enable_webrtc or not self.webrtc_analyzer:
349
- return None
350
-
351
- return WebRTCGradioInterface(self.webrtc_analyzer)
 
 
 
352
 
353
  # SkateFormer methods moved to Version 2 development
354
  # get_skateformer_compatibility() and get_skateformer_status_report()
355
  # will be available in the next major release
356
 
357
  def cleanup(self):
 
358
  """Clean up temporary files and resources."""
359
  # Clean up video input handler
360
  if hasattr(self, 'video_input'):
361
  self.video_input.cleanup()
362
-
363
- # Stop WebRTC if running
364
- if self.webrtc_analyzer and self.webrtc_analyzer.is_running:
365
- self.stop_webrtc_stream()
366
 
367
  def example_payload(self) -> Dict[str, Any]:
 
368
  """Example input payload for documentation."""
369
  return {
370
  "video": {"path": "/path/to/video.mp4"},
@@ -374,6 +326,7 @@ class LabanMovementAnalysis(Component):
374
  }
375
 
376
  def example_value(self) -> Dict[str, Any]:
 
377
  """Example output value for documentation."""
378
  return {
379
  "json_output": {
@@ -422,6 +375,7 @@ class LabanMovementAnalysis(Component):
422
  }
423
 
424
  def api_info(self) -> Dict[str, Any]:
 
425
  """API information for the component."""
426
  return {
427
  "type": "composite",
 
16
  from .visualizer import PoseVisualizer
17
  from .video_downloader import SmartVideoInput
18
 
19
+ # Advanced features reserved for Version 2
20
+ # SkateFormer AI integration will be available in future release
 
 
 
 
21
 
22
+
23
+
24
+ # SkateFormerCompatibility class removed for Version 1 stability
25
+ # Will be reimplemented in Version 2 with enhanced AI features
 
26
 
27
  from gradio.events import Dependency
28
 
 
39
  default_model: str = DEFAULT_MODEL,
40
  enable_visualization: bool = True,
41
  include_keypoints: bool = False,
42
+
43
  label: Optional[str] = None,
44
  every: Optional[float] = None,
45
  show_label: Optional[bool] = None,
 
52
  elem_classes: Optional[List[str]] = None,
53
  render: bool = True,
54
  **kwargs):
55
+ # print("[TRACE] LabanMovementAnalysis.__init__ called")
56
  """
57
  Initialize the Laban Movement Analysis component.
58
 
 
60
  default_model: Default pose estimation model ("mediapipe", "movenet", "yolo")
61
  enable_visualization: Whether to generate visualization video by default
62
  include_keypoints: Whether to include raw keypoints in JSON output
63
+
64
  label: Component label
65
  ... (other standard Gradio component args)
66
  """
 
82
  self.default_model = default_model
83
  self.enable_visualization = enable_visualization
84
  self.include_keypoints = include_keypoints
 
 
85
  # Cache for pose estimators
86
  self._estimators = {}
87
 
 
89
  self.video_input = SmartVideoInput()
90
 
91
  # SkateFormer features reserved for Version 2
 
 
 
 
 
 
 
 
92
 
93
  def preprocess(self, payload: Dict[str, Any]) -> Dict[str, Any]:
94
+ # print("[TRACE] LabanMovementAnalysis.preprocess called")
95
  """
96
  Preprocess input from the frontend.
97
 
 
129
  return options
130
 
131
  def postprocess(self, value: Any) -> Dict[str, Any]:
132
+ # print("[TRACE] LabanMovementAnalysis.postprocess called")
133
  """
134
  Postprocess analysis results for the frontend.
135
 
 
157
  def process_video(self, video_input: Union[str, os.PathLike], model: str = DEFAULT_MODEL,
158
  enable_visualization: bool = True,
159
  include_keypoints: bool = False) -> Tuple[Dict[str, Any], Optional[str]]:
160
+ # print(f"[TRACE] LabanMovementAnalysis.process_video called with model={model}, enable_visualization={enable_visualization}, include_keypoints={include_keypoints}")
161
  """
162
  Main processing function that performs pose analysis on a video.
163
 
 
271
  return json_output, visualization_path
272
 
273
  def __call__(self, video_path: str, **kwargs) -> Tuple[Dict[str, Any], Optional[str]]:
274
+ # print(f"[TRACE] LabanMovementAnalysis.__call__ called with video_path={video_path}")
275
  """
276
  Make the component callable for easy use.
277
 
 
284
  """
285
  return self.process_video(video_path, **kwargs)
286
 
287
+ def process(self, video_input: Union[str, os.PathLike], model: str = DEFAULT_MODEL, include_keypoints: bool = False) -> Dict[str, Any]:
288
  """
289
+ Processes a video and returns only the JSON analysis result.
290
+
291
  Args:
292
+ video_input: Path to input video, video URL, or file object
293
+ model: Pose estimation model to use
294
+ include_keypoints: Whether to include keypoints in JSON
295
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
  Returns:
297
+ dict: Movement analysis results in JSON format
298
  """
299
+ json_output, _ = self.process_video(
300
+ video_input,
301
+ model=model,
302
+ enable_visualization=False,
303
+ include_keypoints=include_keypoints
304
+ )
305
+ return json_output
306
 
307
  # SkateFormer methods moved to Version 2 development
308
  # get_skateformer_compatibility() and get_skateformer_status_report()
309
  # will be available in the next major release
310
 
311
  def cleanup(self):
312
+ # print("[TRACE] LabanMovementAnalysis.cleanup called")
313
  """Clean up temporary files and resources."""
314
  # Clean up video input handler
315
  if hasattr(self, 'video_input'):
316
  self.video_input.cleanup()
 
 
 
 
317
 
318
  def example_payload(self) -> Dict[str, Any]:
319
+ # print("[TRACE] LabanMovementAnalysis.example_payload called")
320
  """Example input payload for documentation."""
321
  return {
322
  "video": {"path": "/path/to/video.mp4"},
 
326
  }
327
 
328
  def example_value(self) -> Dict[str, Any]:
329
+ # print("[TRACE] LabanMovementAnalysis.example_value called")
330
  """Example output value for documentation."""
331
  return {
332
  "json_output": {
 
375
  }
376
 
377
  def api_info(self) -> Dict[str, Any]:
378
+ # print("[TRACE] LabanMovementAnalysis.api_info called")
379
  """API information for the component."""
380
  return {
381
  "type": "composite",
src/backend/gradio_labanmovementanalysis/notation_engine.py CHANGED
@@ -60,74 +60,169 @@ class MovementMetrics:
60
  total_displacement: float = 0.0
61
 
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  class MovementAnalyzer:
64
  """Analyzes pose sequences to extract LMA-style movement metrics."""
65
 
66
  def __init__(self, fps: float = 30.0,
67
- velocity_threshold_slow: float = 0.01, # Normalized units / sec
68
- velocity_threshold_fast: float = 0.1, # Normalized units / sec
69
- intensity_accel_threshold: float = 0.05): # Normalized units / sec^2
 
 
70
  """
71
- Initialize movement analyzer.
72
 
73
  Args:
74
  fps: Frames per second of the video
75
- velocity_threshold_slow: Threshold for slow movement (normalized units per second)
76
- velocity_threshold_fast: Threshold for fast movement (normalized units per second)
77
- intensity_accel_threshold: Acceleration threshold for intensity (normalized units per second^2)
 
 
78
  """
79
  self.fps = fps
80
  self.frame_duration = 1.0 / fps if fps > 0 else 0.0
81
  self.velocity_threshold_slow = velocity_threshold_slow
82
  self.velocity_threshold_fast = velocity_threshold_fast
83
  self.intensity_accel_threshold = intensity_accel_threshold
 
 
 
 
 
 
 
 
84
 
85
  def analyze_movement(self, pose_sequence: List[List[PoseResult]]) -> List[MovementMetrics]:
86
  """
87
  Analyze a sequence of poses to compute movement metrics.
88
 
89
  Args:
90
- pose_sequence: List of pose results per frame. Each inner list contains PoseResult
91
- objects for detected persons in that frame.
92
 
93
  Returns:
94
- List of movement metrics per frame (analyzing the first detected person only).
95
  """
96
  if not pose_sequence:
97
  return []
98
 
99
  metrics = []
100
- prev_center_for_person = None # Tracks the center of the first person from the previous frame
101
- prev_velocity_for_person = None # Tracks the velocity of the first person from the previous frame
 
102
 
103
  for frame_idx, frame_poses in enumerate(pose_sequence):
104
- current_timestamp = frame_idx * self.frame_duration if self.frame_duration else None
105
-
106
- if not frame_poses or not frame_poses[0].keypoints:
107
- # No pose detected, or first pose has no keypoints
108
  metrics.append(MovementMetrics(
109
  frame_index=frame_idx,
110
- timestamp=current_timestamp
111
  ))
112
- # Reset tracking for the next frame where a person might appear
113
- prev_center_for_person = None
114
- prev_velocity_for_person = None
115
  continue
116
 
117
- # Focus on the first detected person
118
- pose = frame_poses[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
- center = self._compute_body_center(pose.keypoints)
 
 
 
 
121
 
 
122
  frame_metrics = MovementMetrics(
123
  frame_index=frame_idx,
124
- timestamp=current_timestamp
125
  )
126
 
127
- if prev_center_for_person is not None and frame_idx > 0 and self.fps > 0:
 
128
  displacement = (
129
- center[0] - prev_center_for_person[0],
130
- center[1] - prev_center_for_person[1]
131
  )
132
  frame_metrics.center_displacement = displacement
133
  frame_metrics.total_displacement = np.sqrt(
@@ -138,9 +233,10 @@ class MovementAnalyzer:
138
  frame_metrics.direction = self._compute_direction(displacement)
139
  frame_metrics.speed = self._categorize_speed(frame_metrics.velocity)
140
 
141
- if prev_velocity_for_person is not None:
142
- delta_velocity = frame_metrics.velocity - prev_velocity_for_person
143
- frame_metrics.acceleration = delta_velocity * self.fps
 
144
 
145
  frame_metrics.intensity = self._compute_intensity(
146
  frame_metrics.acceleration,
@@ -153,13 +249,47 @@ class MovementAnalyzer:
153
  frame_metrics.expansion = self._compute_expansion(pose.keypoints)
154
  metrics.append(frame_metrics)
155
 
156
- # Update for the next iteration, specifically for the first person
157
- prev_center_for_person = center
158
- prev_velocity_for_person = frame_metrics.velocity
 
 
 
 
 
 
159
 
160
  metrics = self._smooth_metrics(metrics)
161
  return metrics
162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  def _compute_body_center(self, keypoints: List[Keypoint]) -> Tuple[float, float]:
164
  """Compute the center of mass of the body."""
165
  major_joints = ["left_hip", "right_hip", "left_shoulder", "right_shoulder"]
@@ -168,20 +298,20 @@ class MovementAnalyzer:
168
  y_coords = []
169
 
170
  for kp in keypoints:
171
- if kp.name in major_joints and kp.confidence > 0.5:
172
  x_coords.append(kp.x)
173
  y_coords.append(kp.y)
174
 
175
- if not x_coords or not y_coords:
176
  x_coords = [kp.x for kp in keypoints if kp.confidence > 0.3]
177
  y_coords = [kp.y for kp in keypoints if kp.confidence > 0.3]
178
 
179
- if x_coords and y_coords:
180
  return (np.mean(x_coords), np.mean(y_coords))
181
- return (0.5, 0.5)
182
 
183
  def _get_limb_positions(self, keypoints: List[Keypoint]) -> Dict[str, Tuple[float, float]]:
184
- """Get positions of major limbs."""
185
  positions = {}
186
  for kp in keypoints:
187
  if kp.confidence > 0.3 and kp.name:
@@ -191,7 +321,7 @@ class MovementAnalyzer:
191
  def _compute_direction(self, displacement: Tuple[float, float]) -> Direction:
192
  """Compute movement direction from displacement vector."""
193
  dx, dy = displacement
194
- threshold = 0.005
195
 
196
  if abs(dx) < threshold and abs(dy) < threshold:
197
  return Direction.STATIONARY
@@ -199,22 +329,23 @@ class MovementAnalyzer:
199
  if abs(dx) > abs(dy):
200
  return Direction.RIGHT if dx > 0 else Direction.LEFT
201
  else:
202
- return Direction.DOWN if dy > 0 else Direction.UP
203
 
204
  def _categorize_speed(self, velocity: float) -> Speed:
205
  """Categorize velocity into speed levels (velocity is in normalized units/sec)."""
206
  if velocity < self.velocity_threshold_slow:
207
  return Speed.SLOW
208
  elif velocity < self.velocity_threshold_fast:
209
- return Speed.MODERATE
210
  else:
211
  return Speed.FAST
212
 
213
  def _compute_intensity(self, acceleration: float, velocity: float) -> Intensity:
214
  """Compute movement intensity (accel in norm_units/sec^2, vel in norm_units/sec)."""
215
- if abs(acceleration) > self.intensity_accel_threshold * 2 or velocity > self.velocity_threshold_fast:
 
216
  return Intensity.HIGH
217
- elif abs(acceleration) > self.intensity_accel_threshold or velocity > self.velocity_threshold_slow:
218
  return Intensity.MEDIUM
219
  else:
220
  return Intensity.LOW
@@ -224,7 +355,8 @@ class MovementAnalyzer:
224
  Compute fluidity score (0-1) based on acceleration (norm_units/sec^2).
225
  Lower acceleration = higher fluidity.
226
  """
227
- max_expected_accel = 0.2
 
228
  norm_accel = min(abs(acceleration) / max_expected_accel, 1.0) if max_expected_accel > 0 else 0.0
229
  return 1.0 - norm_accel
230
 
@@ -238,26 +370,54 @@ class MovementAnalyzer:
238
  ("left_ankle", "right_ankle"),
239
  ("left_wrist", "left_ankle"),
240
  ("right_wrist", "right_ankle"),
 
241
  ]
242
 
243
  kp_dict = {kp.name: kp for kp in keypoints if kp.confidence > 0.3 and kp.name}
244
- if not kp_dict: return 0.5
245
 
246
  distances = []
247
  for name1, name2 in limb_pairs:
248
  if name1 in kp_dict and name2 in kp_dict:
249
  kp1 = kp_dict[name1]
250
  kp2 = kp_dict[name2]
 
251
  if not (np.isnan(kp1.x) or np.isnan(kp1.y) or np.isnan(kp2.x) or np.isnan(kp2.y)):
252
  dist = np.sqrt((kp1.x - kp2.x)**2 + (kp1.y - kp2.y)**2)
253
  distances.append(dist)
254
 
255
  if distances:
256
  avg_dist = np.mean(distances)
257
- max_possible_dist_heuristic = 1.0
 
 
258
  return min(avg_dist / max_possible_dist_heuristic, 1.0) if max_possible_dist_heuristic > 0 else 0.0
259
 
260
- return 0.5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261
 
262
  def _smooth_metrics(self, metrics_list: List[MovementMetrics]) -> List[MovementMetrics]:
263
  """Apply smoothing to reduce noise in metrics using a simple moving average."""
@@ -267,8 +427,9 @@ class MovementAnalyzer:
267
  if num_metrics <= window_size:
268
  return metrics_list
269
 
270
- smoothed_metrics_list = metrics_list[:]
271
 
 
272
  fields_to_smooth = ["velocity", "acceleration", "fluidity", "expansion"]
273
 
274
  for i in range(num_metrics):
 
60
  total_displacement: float = 0.0
61
 
62
 
63
+ class SimpleKalmanFilter:
64
+ """Lightweight Kalman filter for position/velocity tracking."""
65
+
66
+ def __init__(self, process_noise: float = 0.01, measurement_noise: float = 0.1):
67
+ self.process_noise = process_noise
68
+ self.measurement_noise = measurement_noise
69
+ self.is_initialized = False
70
+
71
+ # State: [x, y, vx, vy]
72
+ self.state = np.zeros(4)
73
+ self.covariance = np.eye(4) * 0.1
74
+
75
+ # Transition matrix (constant velocity model)
76
+ self.F = np.array([[1, 0, 1, 0],
77
+ [0, 1, 0, 1],
78
+ [0, 0, 1, 0],
79
+ [0, 0, 0, 1]])
80
+
81
+ # Measurement matrix (observe position only)
82
+ self.H = np.array([[1, 0, 0, 0],
83
+ [0, 1, 0, 0]])
84
+
85
+ # Process noise matrix
86
+ self.Q = np.eye(4) * process_noise
87
+
88
+ # Measurement noise matrix
89
+ self.R = np.eye(2) * measurement_noise
90
+
91
+ def predict(self, dt: float = 1.0):
92
+ """Predict next state."""
93
+ # Update transition matrix with time step
94
+ self.F[0, 2] = dt
95
+ self.F[1, 3] = dt
96
+
97
+ # Predict state
98
+ self.state = self.F @ self.state
99
+ self.covariance = self.F @ self.covariance @ self.F.T + self.Q
100
+
101
+ def update(self, measurement: Tuple[float, float]):
102
+ """Update with measurement."""
103
+ z = np.array([measurement[0], measurement[1]])
104
+
105
+ if not self.is_initialized:
106
+ self.state[:2] = z
107
+ self.is_initialized = True
108
+ return
109
+
110
+ # Calculate Kalman gain
111
+ S = self.H @ self.covariance @ self.H.T + self.R
112
+ K = self.covariance @ self.H.T @ np.linalg.inv(S)
113
+
114
+ # Update state
115
+ y = z - self.H @ self.state
116
+ self.state = self.state + K @ y
117
+ self.covariance = (np.eye(4) - K @ self.H) @ self.covariance
118
+
119
+ def get_position(self) -> Tuple[float, float]:
120
+ """Get filtered position."""
121
+ return (self.state[0], self.state[1])
122
+
123
+ def get_velocity(self) -> Tuple[float, float]:
124
+ """Get filtered velocity."""
125
+ return (self.state[2], self.state[3])
126
+
127
+
128
  class MovementAnalyzer:
129
  """Analyzes pose sequences to extract LMA-style movement metrics."""
130
 
131
  def __init__(self, fps: float = 30.0,
132
+ velocity_threshold_slow: float = 0.01,
133
+ velocity_threshold_fast: float = 0.1,
134
+ intensity_accel_threshold: float = 0.05,
135
+ use_kalman_filter: bool = True,
136
+ use_adaptive_thresholds: bool = True):
137
  """
138
+ Initialize movement analyzer with advanced features.
139
 
140
  Args:
141
  fps: Frames per second of the video
142
+ velocity_threshold_slow: Initial threshold for slow movement
143
+ velocity_threshold_fast: Initial threshold for fast movement
144
+ intensity_accel_threshold: Initial acceleration threshold for intensity
145
+ use_kalman_filter: Whether to use Kalman filtering for tracking
146
+ use_adaptive_thresholds: Whether to adapt thresholds based on video content
147
  """
148
  self.fps = fps
149
  self.frame_duration = 1.0 / fps if fps > 0 else 0.0
150
  self.velocity_threshold_slow = velocity_threshold_slow
151
  self.velocity_threshold_fast = velocity_threshold_fast
152
  self.intensity_accel_threshold = intensity_accel_threshold
153
+ self.use_kalman_filter = use_kalman_filter
154
+ self.use_adaptive_thresholds = use_adaptive_thresholds
155
+
156
+ # Kalman filter for tracking
157
+ self.kalman_filter = SimpleKalmanFilter() if use_kalman_filter else None
158
+
159
+ # Adaptive threshold parameters
160
+ self.adaptive_thresholds_computed = False
161
 
162
  def analyze_movement(self, pose_sequence: List[List[PoseResult]]) -> List[MovementMetrics]:
163
  """
164
  Analyze a sequence of poses to compute movement metrics.
165
 
166
  Args:
167
+ pose_sequence: List of pose results per frame. Each inner list can contain
168
+ multiple PoseResult if multiple people are detected.
169
 
170
  Returns:
171
+ List of movement metrics per frame (currently for the first detected person).
172
  """
173
  if not pose_sequence:
174
  return []
175
 
176
  metrics = []
177
+ # For multi-person, these would need to be dictionaries mapping person_id to values
178
+ prev_centers = None # Store as {person_id: center_coords} for multi-person
179
+ prev_velocity = None # Store as {person_id: velocity_value} for multi-person
180
 
181
  for frame_idx, frame_poses in enumerate(pose_sequence):
182
+ if not frame_poses:
183
+ # No pose detected in this frame
 
 
184
  metrics.append(MovementMetrics(
185
  frame_index=frame_idx,
186
+ timestamp=frame_idx * self.frame_duration if self.frame_duration else None
187
  ))
 
 
 
188
  continue
189
 
190
+ # --- CURRENT: Analyze first person only ---
191
+ # TODO: Extend to multi-person analysis. This would involve iterating
192
+ # through frame_poses and tracking metrics for each person_id.
193
+ pose = frame_poses[0]
194
+ # --- END CURRENT ---
195
+
196
+ if not pose.keypoints: # Ensure pose object has keypoints
197
+ metrics.append(MovementMetrics(
198
+ frame_index=frame_idx,
199
+ timestamp=frame_idx * self.frame_duration if self.frame_duration else None
200
+ ))
201
+ # Reset for next frame if this person was being tracked
202
+ prev_centers = None # Or prev_centers.pop(person_id, None)
203
+ prev_velocity = None # Or prev_velocity.pop(person_id, None)
204
+ continue
205
+
206
+ # Compute confidence-weighted body center
207
+ center = self._compute_body_center_weighted(pose.keypoints)
208
 
209
+ # Apply Kalman filtering if enabled
210
+ if self.kalman_filter:
211
+ self.kalman_filter.predict(self.frame_duration)
212
+ self.kalman_filter.update(center)
213
+ center = self.kalman_filter.get_position()
214
 
215
+ # Initialize metrics for this frame
216
  frame_metrics = MovementMetrics(
217
  frame_index=frame_idx,
218
+ timestamp=frame_idx * self.frame_duration if self.frame_duration else None
219
  )
220
 
221
+ # Displacement, velocity, etc. can only be computed if there's a previous frame's center
222
+ if prev_centers is not None and frame_idx > 0 and self.fps > 0:
223
  displacement = (
224
+ center[0] - prev_centers[0],
225
+ center[1] - prev_centers[1]
226
  )
227
  frame_metrics.center_displacement = displacement
228
  frame_metrics.total_displacement = np.sqrt(
 
233
  frame_metrics.direction = self._compute_direction(displacement)
234
  frame_metrics.speed = self._categorize_speed(frame_metrics.velocity)
235
 
236
+ if prev_velocity is not None:
237
+ # Acceleration (change in velocity per second)
238
+ delta_velocity = frame_metrics.velocity - prev_velocity
239
+ frame_metrics.acceleration = delta_velocity * self.fps # (units/s)/s = units/s^2
240
 
241
  frame_metrics.intensity = self._compute_intensity(
242
  frame_metrics.acceleration,
 
249
  frame_metrics.expansion = self._compute_expansion(pose.keypoints)
250
  metrics.append(frame_metrics)
251
 
252
+ # Update for next iteration (for the currently tracked person)
253
+ prev_centers = center
254
+ prev_velocity = frame_metrics.velocity
255
+
256
+ # Apply adaptive thresholds if enabled
257
+ if self.use_adaptive_thresholds and not self.adaptive_thresholds_computed:
258
+ self._compute_adaptive_thresholds(metrics)
259
+ # Recompute speed and intensity with new thresholds
260
+ metrics = self._recompute_with_adaptive_thresholds(metrics)
261
 
262
  metrics = self._smooth_metrics(metrics)
263
  return metrics
264
 
265
+ def _compute_body_center_weighted(self, keypoints: List[Keypoint]) -> Tuple[float, float]:
266
+ """Compute confidence-weighted center of mass of the body."""
267
+ major_joints = ["left_hip", "right_hip", "left_shoulder", "right_shoulder"]
268
+
269
+ weighted_x = 0.0
270
+ weighted_y = 0.0
271
+ total_weight = 0.0
272
+
273
+ for kp in keypoints:
274
+ if kp.name in major_joints and kp.confidence > 0.5:
275
+ weight = kp.confidence
276
+ weighted_x += kp.x * weight
277
+ weighted_y += kp.y * weight
278
+ total_weight += weight
279
+
280
+ # Fallback to all keypoints if no major joints found
281
+ if total_weight == 0:
282
+ for kp in keypoints:
283
+ if kp.confidence > 0.3:
284
+ weight = kp.confidence
285
+ weighted_x += kp.x * weight
286
+ weighted_y += kp.y * weight
287
+ total_weight += weight
288
+
289
+ if total_weight > 0:
290
+ return (weighted_x / total_weight, weighted_y / total_weight)
291
+ return (0.5, 0.5)
292
+
293
  def _compute_body_center(self, keypoints: List[Keypoint]) -> Tuple[float, float]:
294
  """Compute the center of mass of the body."""
295
  major_joints = ["left_hip", "right_hip", "left_shoulder", "right_shoulder"]
 
298
  y_coords = []
299
 
300
  for kp in keypoints:
301
+ if kp.confidence > 0.5 and kp.name in major_joints: # Ensure kp.name is not None
302
  x_coords.append(kp.x)
303
  y_coords.append(kp.y)
304
 
305
+ if not x_coords or not y_coords: # If no major joints, try all keypoints
306
  x_coords = [kp.x for kp in keypoints if kp.confidence > 0.3]
307
  y_coords = [kp.y for kp in keypoints if kp.confidence > 0.3]
308
 
309
+ if x_coords and y_coords: # Check if lists are non-empty
310
  return (np.mean(x_coords), np.mean(y_coords))
311
+ return (0.5, 0.5) # Default center if no reliable keypoints
312
 
313
  def _get_limb_positions(self, keypoints: List[Keypoint]) -> Dict[str, Tuple[float, float]]:
314
+ """Get positions of major limbs. (Currently not heavily used beyond potential debugging)"""
315
  positions = {}
316
  for kp in keypoints:
317
  if kp.confidence > 0.3 and kp.name:
 
321
  def _compute_direction(self, displacement: Tuple[float, float]) -> Direction:
322
  """Compute movement direction from displacement vector."""
323
  dx, dy = displacement
324
+ threshold = 0.005 # Normalized units per frame
325
 
326
  if abs(dx) < threshold and abs(dy) < threshold:
327
  return Direction.STATIONARY
 
329
  if abs(dx) > abs(dy):
330
  return Direction.RIGHT if dx > 0 else Direction.LEFT
331
  else:
332
+ return Direction.DOWN if dy > 0 else Direction.UP # dy positive is typically down in image coords
333
 
334
  def _categorize_speed(self, velocity: float) -> Speed:
335
  """Categorize velocity into speed levels (velocity is in normalized units/sec)."""
336
  if velocity < self.velocity_threshold_slow:
337
  return Speed.SLOW
338
  elif velocity < self.velocity_threshold_fast:
339
+ return Speed.MODERATE # Corrected from Speed.FAST
340
  else:
341
  return Speed.FAST
342
 
343
  def _compute_intensity(self, acceleration: float, velocity: float) -> Intensity:
344
  """Compute movement intensity (accel in norm_units/sec^2, vel in norm_units/sec)."""
345
+ # Thresholds are relative to normalized space and per-second metrics
346
+ if acceleration > self.intensity_accel_threshold * 2 or velocity > self.velocity_threshold_fast:
347
  return Intensity.HIGH
348
+ elif acceleration > self.intensity_accel_threshold or velocity > self.velocity_threshold_slow:
349
  return Intensity.MEDIUM
350
  else:
351
  return Intensity.LOW
 
355
  Compute fluidity score (0-1) based on acceleration (norm_units/sec^2).
356
  Lower acceleration = higher fluidity.
357
  """
358
+ max_expected_accel = 0.2 # This is an assumption for normalization, might need tuning.
359
+ # Represents a fairly high acceleration in normalized units/sec^2.
360
  norm_accel = min(abs(acceleration) / max_expected_accel, 1.0) if max_expected_accel > 0 else 0.0
361
  return 1.0 - norm_accel
362
 
 
370
  ("left_ankle", "right_ankle"),
371
  ("left_wrist", "left_ankle"),
372
  ("right_wrist", "right_ankle"),
373
+ # Could add torso diagonals like ("left_shoulder", "right_hip")
374
  ]
375
 
376
  kp_dict = {kp.name: kp for kp in keypoints if kp.confidence > 0.3 and kp.name}
377
+ if not kp_dict: return 0.5 # No reliable keypoints
378
 
379
  distances = []
380
  for name1, name2 in limb_pairs:
381
  if name1 in kp_dict and name2 in kp_dict:
382
  kp1 = kp_dict[name1]
383
  kp2 = kp_dict[name2]
384
+ # Ensure coordinates are not NaN before calculation
385
  if not (np.isnan(kp1.x) or np.isnan(kp1.y) or np.isnan(kp2.x) or np.isnan(kp2.y)):
386
  dist = np.sqrt((kp1.x - kp2.x)**2 + (kp1.y - kp2.y)**2)
387
  distances.append(dist)
388
 
389
  if distances:
390
  avg_dist = np.mean(distances)
391
+ # Max expected distance (e.g., diagonal of normalized 1x1 space is sqrt(2) approx 1.414)
392
+ # This assumes keypoints are normalized.
393
+ max_possible_dist_heuristic = 1.0 # A more conservative heuristic than 1.4, as limbs rarely span the full diagonal.
394
  return min(avg_dist / max_possible_dist_heuristic, 1.0) if max_possible_dist_heuristic > 0 else 0.0
395
 
396
+ return 0.5 # Default neutral expansion if no valid pairs
397
+
398
+ def _compute_adaptive_thresholds(self, metrics: List[MovementMetrics]):
399
+ """Compute adaptive thresholds based on movement characteristics in the video."""
400
+ velocities = [m.velocity for m in metrics if m.velocity > 0]
401
+ accelerations = [abs(m.acceleration) for m in metrics if m.acceleration != 0]
402
+
403
+ if velocities:
404
+ velocity_percentiles = np.percentile(velocities, [25, 75])
405
+ self.velocity_threshold_slow = max(velocity_percentiles[0], 0.005)
406
+ self.velocity_threshold_fast = velocity_percentiles[1]
407
+
408
+ if accelerations:
409
+ accel_75th = np.percentile(accelerations, 75)
410
+ self.intensity_accel_threshold = max(accel_75th * 0.5, 0.01)
411
+
412
+ self.adaptive_thresholds_computed = True
413
+
414
+ def _recompute_with_adaptive_thresholds(self, metrics: List[MovementMetrics]) -> List[MovementMetrics]:
415
+ """Recompute speed and intensity classifications with adaptive thresholds."""
416
+ for metric in metrics:
417
+ if metric.velocity > 0:
418
+ metric.speed = self._categorize_speed(metric.velocity)
419
+ metric.intensity = self._compute_intensity(metric.acceleration, metric.velocity)
420
+ return metrics
421
 
422
  def _smooth_metrics(self, metrics_list: List[MovementMetrics]) -> List[MovementMetrics]:
423
  """Apply smoothing to reduce noise in metrics using a simple moving average."""
 
427
  if num_metrics <= window_size:
428
  return metrics_list
429
 
430
+ smoothed_metrics_list = metrics_list[:] # Work on a copy
431
 
432
+ # Fields to smooth
433
  fields_to_smooth = ["velocity", "acceleration", "fluidity", "expansion"]
434
 
435
  for i in range(num_metrics):
src/cookies.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Netscape HTTP Cookie File
2
+ # This file is generated by yt-dlp. Do not edit.
3
+
4
+ .youtube.com TRUE / FALSE 0 PREF hl=en&tz=UTC
5
+ .youtube.com TRUE / TRUE 0 SOCS CAI
6
+ .youtube.com TRUE / TRUE 1749571718 GPS 1
7
+ .youtube.com TRUE / TRUE 0 YSC Lynn9Gphl18
8
+ .youtube.com TRUE / TRUE 1765122596 VISITOR_INFO1_LIVE pOBV-yb5TwA
9
+ .youtube.com TRUE / TRUE 1765122596 VISITOR_PRIVACY_METADATA CgJTSxIhEh0SGwsMDg8QERITFBUWFxgZGhscHR4fICEiIyQlJiA5
10
+ .youtube.com TRUE / TRUE 1765121919 __Secure-ROLLOUT_TOKEN CIvW7bfurM-MgAEQm8aF7JfnjQMYuv2n7JfnjQM%3D
11
+ .youtube.com TRUE / TRUE 1812642596 __Secure-YT_TVFAS t=485991&s=2
12
+ .youtube.com TRUE / TRUE 1765122596 DEVICE_INFO ChxOelV4TkRNME5UVTROVGsyTWpnME1UazBNZz09EKSoocIGGP+iocIG
src/demo/app.py CHANGED
@@ -3,11 +3,12 @@
3
  Laban Movement Analysis – modernised Gradio Space
4
  Author: Csaba (BladeSzaSza)
5
  """
6
-
7
  import gradio as gr
8
  import os
 
 
9
  from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
10
- # from gradio_labanmovementanalysis import LabanMovementAnalysis
11
 
12
  # Import agent API if available
13
  # Initialize agent API if available
@@ -19,14 +20,11 @@ try:
19
  MovementDirection,
20
  MovementIntensity
21
  )
 
22
  HAS_AGENT_API = True
23
-
24
- try:
25
- agent_api = LabanAgentAPI()
26
- except Exception as e:
27
- print(f"Warning: Agent API not available: {e}")
28
- agent_api = None
29
- except ImportError:
30
  HAS_AGENT_API = False
31
  # Initialize components
32
  try:
@@ -59,21 +57,86 @@ def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
59
  error_result = {"error": str(e)}
60
  return error_result, None
61
 
62
- def process_video_standard(video, model, enable_viz, include_keypoints):
63
- """Standard video processing function."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  if video is None:
65
- return None, None
66
-
67
  try:
68
- json_output, video_output = analyzer.process_video(
69
  video,
70
  model=model,
71
- enable_visualization=enable_viz,
72
  include_keypoints=include_keypoints
73
  )
74
- return json_output, video_output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  except Exception as e:
76
- return {"error": str(e)}, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  # ── 4. Build UI ─────────────────────────────────────────────────
79
  def create_demo() -> gr.Blocks:
@@ -82,18 +145,18 @@ def create_demo() -> gr.Blocks:
82
  theme='gstaff/sketch',
83
  fill_width=True,
84
  ) as demo:
85
-
86
  # ── Hero banner ──
87
  gr.Markdown(
88
  """
89
- # 🎭 Laban Movement Analysis
90
 
91
  Pose estimation • AI action recognition • Movement Analysis
92
  """
93
  )
94
  with gr.Tabs():
95
  # Tab 1: Standard Analysis
96
- with gr.Tab("🎬 Standard Analysis"):
97
  gr.Markdown("""
98
  ### Upload a video file to analyze movement using traditional LMA metrics with pose estimation.
99
  """)
@@ -130,8 +193,8 @@ def create_demo() -> gr.Blocks:
130
  )
131
 
132
  with gr.Accordion("Analysis Options", open=False):
133
- enable_viz = gr.Radio([("Yes", 1), ("No", 0)], value=1, label="Visualization")
134
- include_kp = gr.Radio([("Yes", 1), ("No", 0)], value=0, label="Raw Keypoints")
135
 
136
  gr.Examples(
137
  examples=[
@@ -156,7 +219,9 @@ def create_demo() -> gr.Blocks:
156
  def process_enhanced_input(file_input, url_input, model, enable_viz, include_keypoints):
157
  """Process either file upload or URL input."""
158
  video_source = file_input if file_input else url_input
159
- return process_video_enhanced(video_source, model, enable_viz, include_keypoints)
 
 
160
 
161
  analyze_btn_enh.click(
162
  fn=process_enhanced_input,
@@ -165,18 +230,295 @@ def create_demo() -> gr.Blocks:
165
  api_name="analyze_enhanced"
166
  )
167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  # Footer
169
  with gr.Row():
170
  gr.Markdown(
171
  """
172
  **Built by Csaba Bolyós**
173
- [GitHub](https://github.com/bladeszasza) • [HF](https://huggingface.co/BladeSzaSza)
174
  """
175
  )
176
  return demo
177
-
 
178
  if __name__ == "__main__":
179
  demo = create_demo()
180
  demo.launch(server_name="0.0.0.0",
 
181
  server_port=int(os.getenv("PORT", 7860)),
182
  mcp_server=True)
 
3
  Laban Movement Analysis – modernised Gradio Space
4
  Author: Csaba (BladeSzaSza)
5
  """
 
6
  import gradio as gr
7
  import os
8
+ from pathlib import Path
9
+ # from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
10
  from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
11
+ from gradio_overlay_video import OverlayVideo
12
 
13
  # Import agent API if available
14
  # Initialize agent API if available
 
20
  MovementDirection,
21
  MovementIntensity
22
  )
23
+ agent_api = LabanAgentAPI()
24
  HAS_AGENT_API = True
25
+ except Exception as e:
26
+ print(f"Warning: Agent API not available: {e}")
27
+ agent_api = None
 
 
 
 
28
  HAS_AGENT_API = False
29
  # Initialize components
30
  try:
 
57
  error_result = {"error": str(e)}
58
  return error_result, None
59
 
60
+ def process_video_standard(video : str, model : str, include_keypoints : bool) -> dict:
61
+ """
62
+ Processes a video file using the specified pose estimation model and returns movement analysis results.
63
+
64
+ Args:
65
+ video (str): Path to the video file to be analyzed.
66
+ model (str): The name of the pose estimation model to use (e.g., "mediapipe-full", "movenet-thunder", etc.).
67
+ include_keypoints (bool): Whether to include raw keypoint data in the output.
68
+
69
+ Returns:
70
+ dict:
71
+ - A dictionary containing the movement analysis results in JSON format, or an error message if processing fails.
72
+
73
+
74
+ Notes:
75
+ - Visualization is disabled in this standard processing function.
76
+ - If the input video is None, both return values will be None.
77
+ - If an error occurs during processing, the first return value will be a dictionary with an "error" key.
78
+ """
79
  if video is None:
80
+ return None
 
81
  try:
82
+ json_output, _ = analyzer.process_video(
83
  video,
84
  model=model,
85
+ enable_visualization=False,
86
  include_keypoints=include_keypoints
87
  )
88
+ return json_output
89
+ except (RuntimeError, ValueError, OSError) as e:
90
+ return {"error": str(e)}
91
+
92
+ def process_video_for_agent(video, model, output_format="summary"):
93
+ """Process video with agent-friendly output format."""
94
+ if not HAS_AGENT_API or agent_api is None:
95
+ return {"error": "Agent API not available"}
96
+
97
+ if not video:
98
+ return {"error": "No video provided"}
99
+
100
+ try:
101
+ model_enum = PoseModel(model)
102
+ result = agent_api.analyze(video, model=model_enum, generate_visualization=False)
103
+
104
+ if output_format == "summary":
105
+ return {"summary": agent_api.get_movement_summary(result)}
106
+ elif output_format == "structured":
107
+ return {
108
+ "success": result.success,
109
+ "direction": result.dominant_direction.value,
110
+ "intensity": result.dominant_intensity.value,
111
+ "speed": result.dominant_speed,
112
+ "fluidity": result.fluidity_score,
113
+ "expansion": result.expansion_score,
114
+ "segments": len(result.movement_segments)
115
+ }
116
+ else: # json
117
+ return result.raw_data
118
  except Exception as e:
119
+ return {"error": str(e)}
120
+
121
+ # Batch processing removed due to MediaPipe compatibility issues
122
+
123
+ # process_standard_for_agent is now imported from backend
124
+
125
+ # Movement filtering removed due to MediaPipe compatibility issues
126
+
127
+ # Import agentic analysis functions from backend
128
+ try:
129
+ from gradio_labanmovementanalysis.agentic_analysis import (
130
+ generate_agentic_analysis,
131
+ process_standard_for_agent
132
+ )
133
+ except ImportError:
134
+ # Fallback if backend module is not available
135
+ def generate_agentic_analysis(json_data, analysis_type, filter_direction="any", filter_intensity="any", filter_min_fluidity=0.0, filter_min_expansion=0.0):
136
+ return {"error": "Agentic analysis backend not available"}
137
+
138
+ def process_standard_for_agent(json_data, output_format="summary"):
139
+ return {"error": "Agent conversion backend not available"}
140
 
141
  # ── 4. Build UI ─────────────────────────────────────────────────
142
  def create_demo() -> gr.Blocks:
 
145
  theme='gstaff/sketch',
146
  fill_width=True,
147
  ) as demo:
148
+ # gr.api(process_video_standard, api_name="process_video")
149
  # ── Hero banner ──
150
  gr.Markdown(
151
  """
152
+ # 🩰 Laban Movement Analysis
153
 
154
  Pose estimation • AI action recognition • Movement Analysis
155
  """
156
  )
157
  with gr.Tabs():
158
  # Tab 1: Standard Analysis
159
+ with gr.Tab("🎭 Standard Analysis"):
160
  gr.Markdown("""
161
  ### Upload a video file to analyze movement using traditional LMA metrics with pose estimation.
162
  """)
 
193
  )
194
 
195
  with gr.Accordion("Analysis Options", open=False):
196
+ enable_viz = gr.Radio([("Create", 1), ("Dismiss", 0)], value=1, label="Visualization")
197
+ include_kp = gr.Radio([("Include", 1), ("Exclude", 0)], value=1, label="Raw Keypoints")
198
 
199
  gr.Examples(
200
  examples=[
 
219
  def process_enhanced_input(file_input, url_input, model, enable_viz, include_keypoints):
220
  """Process either file upload or URL input."""
221
  video_source = file_input if file_input else url_input
222
+ [json_out, viz_out] = process_video_enhanced(video_source, model, enable_viz, include_keypoints)
223
+ overlay_video.value = (None, json_out)
224
+ return [json_out, viz_out]
225
 
226
  analyze_btn_enh.click(
227
  fn=process_enhanced_input,
 
230
  api_name="analyze_enhanced"
231
  )
232
 
233
+ with gr.Tab("🎬 Overlayed Visualisation"):
234
+ gr.Markdown(
235
+ "# 🩰 Interactive Pose Visualization\n"
236
+ "## See the movement analysis in action with an interactive overlay. "
237
+ "Analyze video @ 🎬 Standard Analysis tab"
238
+ )
239
+ with gr.Row(equal_height=True, min_height=240):
240
+ with gr.Column(scale=1):
241
+ overlay_video = OverlayVideo(
242
+ value=(None, json_out),
243
+ autoplay=True,
244
+ interactive=False
245
+ )
246
+
247
+
248
+ # Update overlay when JSON changes
249
+ def update_overlay(json_source):
250
+ """Update overlay video with JSON data from analysis or upload."""
251
+ if json_source:
252
+ return OverlayVideo(value=("", json_source), autoplay=True, interactive=False)
253
+ return OverlayVideo(value=("", None), autoplay=True, interactive=False)
254
+
255
+ # Connect JSON output from analysis to overlay
256
+ json_out.change(
257
+ fn=update_overlay,
258
+ inputs=[json_out],
259
+ outputs=[overlay_video]
260
+ )
261
+
262
+ # Tab 3: Agentic Analysis
263
+ with gr.Tab("🤖 Agentic Analysis"):
264
+ gr.Markdown("""
265
+ ### Intelligent Movement Interpretation
266
+ AI-powered analysis using the processed data from the Standard Analysis tab.
267
+ """)
268
+
269
+ with gr.Row(equal_height=True):
270
+ # Left column - Video display (sourced from first tab)
271
+ with gr.Column(scale=1, min_width=400):
272
+ gr.Markdown("**Source Video** *(from Standard Analysis)*")
273
+ agentic_video_display = gr.Video(
274
+ label="Analyzed Video",
275
+ interactive=False,
276
+ height=350
277
+ )
278
+
279
+ # Model info display (sourced from first tab)
280
+ gr.Markdown("**Model Used** *(from Standard Analysis)*")
281
+ agentic_model_display = gr.Textbox(
282
+ label="Pose Model",
283
+ interactive=False,
284
+ value="No analysis completed yet"
285
+ )
286
+
287
+ # Right column - Analysis options and output
288
+ with gr.Column(scale=1, min_width=400):
289
+ gr.Markdown("**Analysis Type**")
290
+ agentic_analysis_type = gr.Radio(
291
+ choices=[
292
+ ("🎯 SUMMARY", "summary"),
293
+ ("📊 STRUCTURED", "structured"),
294
+ ("🔍 MOVEMENT FILTERS", "movement_filters")
295
+ ],
296
+ value="summary",
297
+ label="Choose Analysis",
298
+ info="Select the type of intelligent analysis"
299
+ )
300
+
301
+ # Movement filters options (shown when movement_filters is selected)
302
+ with gr.Group(visible=False) as movement_filter_options:
303
+ gr.Markdown("**Filter Criteria**")
304
+ filter_direction = gr.Dropdown(
305
+ choices=["any", "up", "down", "left", "right", "forward", "backward", "stationary"],
306
+ value="any",
307
+ label="Dominant Direction"
308
+ )
309
+ filter_intensity = gr.Dropdown(
310
+ choices=["any", "low", "medium", "high"],
311
+ value="any",
312
+ label="Movement Intensity"
313
+ )
314
+ filter_min_fluidity = gr.Slider(0.0, 1.0, 0.0, label="Minimum Fluidity Score")
315
+ filter_min_expansion = gr.Slider(0.0, 1.0, 0.0, label="Minimum Expansion Score")
316
+
317
+ analyze_agentic_btn = gr.Button("🚀 Generate Analysis", variant="primary", size="lg")
318
+
319
+ # Output display
320
+ with gr.Accordion("Analysis Results", open=True):
321
+ agentic_output = gr.JSON(label="Intelligent Analysis Results")
322
+
323
+ # Show/hide movement filter options based on selection
324
+ def toggle_filter_options(analysis_type):
325
+ return gr.Group(visible=(analysis_type == "movement_filters"))
326
+
327
+ agentic_analysis_type.change(
328
+ fn=toggle_filter_options,
329
+ inputs=[agentic_analysis_type],
330
+ outputs=[movement_filter_options]
331
+ )
332
+
333
+ # Update video display when standard analysis completes
334
+ def update_agentic_video_display(video_input, url_input, model):
335
+ """Update agentic tab with video and model from standard analysis."""
336
+ video_source = video_input if video_input else url_input
337
+ return video_source, f"Model: {model}"
338
+
339
+ # Link to standard analysis inputs
340
+ video_in.change(
341
+ fn=update_agentic_video_display,
342
+ inputs=[video_in, url_input_enh, model_sel],
343
+ outputs=[agentic_video_display, agentic_model_display]
344
+ )
345
+
346
+ url_input_enh.change(
347
+ fn=update_agentic_video_display,
348
+ inputs=[video_in, url_input_enh, model_sel],
349
+ outputs=[agentic_video_display, agentic_model_display]
350
+ )
351
+
352
+ model_sel.change(
353
+ fn=update_agentic_video_display,
354
+ inputs=[video_in, url_input_enh, model_sel],
355
+ outputs=[agentic_video_display, agentic_model_display]
356
+ )
357
+
358
+ # Hook up the Generate Analysis button
359
+ def process_agentic_analysis(json_data, analysis_type, filter_direction, filter_intensity, filter_min_fluidity, filter_min_expansion):
360
+ """Process agentic analysis based on user selection."""
361
+ return generate_agentic_analysis(
362
+ json_data,
363
+ analysis_type,
364
+ filter_direction,
365
+ filter_intensity,
366
+ filter_min_fluidity,
367
+ filter_min_expansion
368
+ )
369
+
370
+ analyze_agentic_btn.click(
371
+ fn=process_agentic_analysis,
372
+ inputs=[
373
+ json_out, # JSON data from standard analysis
374
+ agentic_analysis_type,
375
+ filter_direction,
376
+ filter_intensity,
377
+ filter_min_fluidity,
378
+ filter_min_expansion
379
+ ],
380
+ outputs=[agentic_output],
381
+ api_name="analyze_agentic"
382
+ )
383
+
384
+ # Auto-update agentic analysis when JSON changes and analysis type is summary
385
+ def auto_update_summary(json_data, analysis_type):
386
+ """Auto-update with summary when new analysis is available."""
387
+ if json_data and analysis_type == "summary":
388
+ return generate_agentic_analysis(json_data, "summary")
389
+ return None
390
+
391
+ json_out.change(
392
+ fn=auto_update_summary,
393
+ inputs=[json_out, agentic_analysis_type],
394
+ outputs=[agentic_output]
395
+ )
396
+
397
+ # Tab 4: About
398
+ with gr.Tab("ℹ️ About"):
399
+ gr.Markdown("""
400
+ # 🩰 Developer Journey: Laban Movement Analysis
401
+
402
+ ## 🎯 Project Vision
403
+
404
+ Created to bridge the gap between traditional **Laban Movement Analysis (LMA)** principles and modern **AI-powered pose estimation**, this platform represents a comprehensive approach to understanding human movement through technology.
405
+
406
+ ## 🛠️ Technical Architecture
407
+
408
+ ### **Core Foundation**
409
+ - **15 Pose Estimation Models** from diverse sources and frameworks
410
+ - **Multi-format Video Processing** with URL support (YouTube, Vimeo, direct links)
411
+ - **Real-time Analysis Pipeline** with configurable model selection
412
+ - **MCP-Compatible API** for AI agent integration
413
+
414
+ ### **Pose Model Ecosystem**
415
+ ```
416
+ 📊 MediaPipe Family (Google) → 3 variants (lite/full/heavy)
417
+ ⚡ MoveNet Family (TensorFlow) → 2 variants (lightning/thunder)
418
+ 🎯 YOLO v8 Family (Ultralytics) → 5 variants (n/s/m/l/x)
419
+ 🔥 YOLO v11 Family (Ultralytics)→ 5 variants (n/s/m/l/x)
420
+ ```
421
+
422
+ ## 🎨 Innovation Highlights
423
+
424
+ ### **1. Custom Gradio Component: `gradio_overlay_video`**
425
+ - **Layered Visualization**: Controlled overlay of pose data on original video
426
+ - **Interactive Controls**: Frame-by-frame analysis with movement metrics
427
+ - **Synchronized Playback**: Real-time correlation between video and data
428
+
429
+ ### **2. Agentic Analysis Engine**
430
+ Beyond raw pose detection, we've developed intelligent interpretation layers:
431
+
432
+ - **🎯 SUMMARY**: Narrative movement interpretation with temporal pattern analysis
433
+ - **📊 STRUCTURED**: Comprehensive quantitative breakdowns with statistical insights
434
+ - **🔍 MOVEMENT FILTERS**: Advanced pattern detection with customizable criteria
435
+
436
+ ### **3. Temporal Pattern Recognition**
437
+ - **Movement Consistency Tracking**: Direction and intensity variation analysis
438
+ - **Complexity Scoring**: Multi-dimensional movement sophistication metrics
439
+ - **Sequence Detection**: Continuous movement pattern identification
440
+ - **Laban Integration**: Professional movement quality assessment using LMA principles
441
+
442
+ ## 📈 Processing Pipeline
443
+
444
+ ```mermaid
445
+ Video Input → Pose Detection → LMA Analysis → JSON Output
446
+ ↓ ↓ ↓ ↓
447
+ URL/Upload → 15 Models → Temporal → Visualization
448
+ ↓ ↓ Patterns ↓
449
+ Preprocessing → Keypoints → Metrics → Agentic Analysis
450
+ ```
451
+
452
+ ## 🎭 Laban Movement Analysis Integration
453
+
454
+ Our implementation translates raw pose coordinates into meaningful movement qualities:
455
+
456
+ - **Effort Qualities**: Intensity, speed, and flow characteristics
457
+ - **Space Usage**: Expansion patterns and directional preferences
458
+ - **Temporal Dynamics**: Rhythm, acceleration, and movement consistency
459
+ - **Quality Assessment**: Fluidity scores and movement sophistication
460
+
461
+ ## 🔬 Technical Achievements
462
+
463
+ ### **Multi-Source Model Integration**
464
+ Successfully unified models from different frameworks:
465
+ - Google's MediaPipe (BlazePose architecture)
466
+ - TensorFlow's MoveNet (lightweight and accurate variants)
467
+ - Ultralytics' YOLO ecosystem (object detection adapted for pose)
468
+
469
+ ### **Real-Time Processing Capabilities**
470
+ - **Streaming Support**: Frame-by-frame processing with temporal continuity
471
+ - **Memory Optimization**: Efficient handling of large video files
472
+ - **Error Recovery**: Graceful handling of pose detection failures
473
+
474
+ ### **Agent-Ready Architecture**
475
+ - **MCP Server Integration**: Compatible with AI agent workflows
476
+ - **Structured API**: RESTful endpoints for programmatic access
477
+ - **Flexible Output Formats**: JSON, visualization videos, and metadata
478
+
479
+ ## 🌟 Future Roadmap
480
+
481
+ - **3D Pose Integration**: Depth-aware movement analysis
482
+ - **Multi-Person Tracking**: Ensemble and group movement dynamics
483
+ - **Real-Time Streaming**: Live movement analysis capabilities
484
+ - **Machine Learning Enhancement**: Custom models trained on movement data
485
+
486
+ ## 🔧 Built With
487
+
488
+ - **Frontend**: Gradio 5.33+ with custom Svelte components
489
+ - **Backend**: Python with FastAPI and async processing
490
+ - **Computer Vision**: MediaPipe, TensorFlow, PyTorch, Ultralytics
491
+ - **Analysis**: NumPy, OpenCV, custom Laban algorithms
492
+ - **Deployment**: Hugging Face Spaces with Docker support
493
+
494
+ ---
495
+
496
+ ### 👨‍💻 Created by **Csaba Bolyós**
497
+
498
+ *Combining classical movement analysis with cutting-edge AI to unlock new possibilities in human movement understanding.*
499
+
500
+ **Connect:**
501
+ [GitHub](https://github.com/bladeszasza) • [Hugging Face](https://huggingface.co/BladeSzaSza) • [LinkedIn](https://www.linkedin.com/in/csaba-bolyós-00a11767/)
502
+
503
+ ---
504
+
505
+ > *"Movement is a language. Technology helps us understand what the body is saying."*
506
+ """)
507
+
508
  # Footer
509
  with gr.Row():
510
  gr.Markdown(
511
  """
512
  **Built by Csaba Bolyós**
513
+ [GitHub](https://github.com/bladeszasza) • [HF](https://huggingface.co/BladeSzaSza) • [LinkedIn](https://www.linkedin.com/in/csaba-bolyós-00a11767/)
514
  """
515
  )
516
  return demo
517
+
518
+
519
  if __name__ == "__main__":
520
  demo = create_demo()
521
  demo.launch(server_name="0.0.0.0",
522
+ share=True,
523
  server_port=int(os.getenv("PORT", 7860)),
524
  mcp_server=True)
src/demo/space.py CHANGED
@@ -46,8 +46,8 @@ Author: Csaba (BladeSzaSza)
46
 
47
  import gradio as gr
48
  import os
49
- from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
50
- # from gradio_labanmovementanalysis import LabanMovementAnalysis
51
 
52
  # Import agent API if available
53
  # Initialize agent API if available
@@ -59,15 +59,10 @@ try:
59
  MovementDirection,
60
  MovementIntensity
61
  )
62
- HAS_AGENT_API = True
63
-
64
- try:
65
- agent_api = LabanAgentAPI()
66
- except Exception as e:
67
- print(f"Warning: Agent API not available: {e}")
68
- agent_api = None
69
- except ImportError:
70
- HAS_AGENT_API = False
71
  # Initialize components
72
  try:
73
  analyzer = LabanMovementAnalysis(
@@ -99,21 +94,37 @@ def process_video_enhanced(video_input, model, enable_viz, include_keypoints):
99
  error_result = {"error": str(e)}
100
  return error_result, None
101
 
102
- def process_video_standard(video, model, enable_viz, include_keypoints):
103
- \"\"\"Standard video processing function.\"\"\"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  if video is None:
105
- return None, None
106
-
107
  try:
108
- json_output, video_output = analyzer.process_video(
109
  video,
110
  model=model,
111
- enable_visualization=enable_viz,
112
  include_keypoints=include_keypoints
113
  )
114
- return json_output, video_output
115
- except Exception as e:
116
- return {"error": str(e)}, None
117
 
118
  # ── 4. Build UI ─────────────────────────────────────────────────
119
  def create_demo() -> gr.Blocks:
@@ -122,7 +133,7 @@ def create_demo() -> gr.Blocks:
122
  theme='gstaff/sketch',
123
  fill_width=True,
124
  ) as demo:
125
-
126
  # ── Hero banner ──
127
  gr.Markdown(
128
  \"\"\"
@@ -214,10 +225,12 @@ def create_demo() -> gr.Blocks:
214
  \"\"\"
215
  )
216
  return demo
217
-
 
218
  if __name__ == "__main__":
219
  demo = create_demo()
220
  demo.launch(server_name="0.0.0.0",
 
221
  server_port=int(os.getenv("PORT", 7860)),
222
  mcp_server=True)
223
 
 
46
 
47
  import gradio as gr
48
  import os
49
+ # from backend.gradio_labanmovementanalysis import LabanMovementAnalysis
50
+ from gradio_labanmovementanalysis import LabanMovementAnalysis
51
 
52
  # Import agent API if available
53
  # Initialize agent API if available
 
59
  MovementDirection,
60
  MovementIntensity
61
  )
62
+ agent_api = LabanAgentAPI()
63
+ except Exception as e:
64
+ print(f"Warning: Agent API not available: {e}")
65
+ agent_api = None
 
 
 
 
 
66
  # Initialize components
67
  try:
68
  analyzer = LabanMovementAnalysis(
 
94
  error_result = {"error": str(e)}
95
  return error_result, None
96
 
97
+ def process_video_standard(video : str, model : str, include_keypoints : bool) -> dict:
98
+ \"\"\"
99
+ Processes a video file using the specified pose estimation model and returns movement analysis results.
100
+
101
+ Args:
102
+ video (str): Path to the video file to be analyzed.
103
+ model (str): The name of the pose estimation model to use (e.g., "mediapipe-full", "movenet-thunder", etc.).
104
+ include_keypoints (bool): Whether to include raw keypoint data in the output.
105
+
106
+ Returns:
107
+ dict:
108
+ - A dictionary containing the movement analysis results in JSON format, or an error message if processing fails.
109
+
110
+
111
+ Notes:
112
+ - Visualization is disabled in this standard processing function.
113
+ - If the input video is None, both return values will be None.
114
+ - If an error occurs during processing, the first return value will be a dictionary with an "error" key.
115
+ \"\"\"
116
  if video is None:
117
+ return None
 
118
  try:
119
+ json_output, _ = analyzer.process_video(
120
  video,
121
  model=model,
122
+ enable_visualization=False,
123
  include_keypoints=include_keypoints
124
  )
125
+ return json_output
126
+ except (RuntimeError, ValueError, OSError) as e:
127
+ return {"error": str(e)}
128
 
129
  # ── 4. Build UI ─────────────────────────────────────────────────
130
  def create_demo() -> gr.Blocks:
 
133
  theme='gstaff/sketch',
134
  fill_width=True,
135
  ) as demo:
136
+ gr.api(process_video_standard, api_name="process_video")
137
  # ── Hero banner ──
138
  gr.Markdown(
139
  \"\"\"
 
225
  \"\"\"
226
  )
227
  return demo
228
+
229
+
230
  if __name__ == "__main__":
231
  demo = create_demo()
232
  demo.launch(server_name="0.0.0.0",
233
+ share=True,
234
  server_port=int(os.getenv("PORT", 7860)),
235
  mcp_server=True)
236
 
src/examples/mediapipe_full.json ADDED
The diff for this file is too large to render. See raw diff
 
src/pyproject.toml CHANGED
@@ -8,8 +8,8 @@ build-backend = "hatchling.build"
8
 
9
  [project]
10
  name = "gradio_labanmovementanalysis"
11
- version = "0.0.5"
12
- description = "A Gradio 5 component for video movement analysis using Laban Movement Analysis (LMA) with MCP support for AI agents"
13
  readme = "README.md"
14
  license = "apache-2.0"
15
  authors = [{ name = "Csaba Bolyós", email = "bladeszasza@gmail.com" }]
@@ -24,7 +24,8 @@ dependencies = [
24
  "ultralytics>=8.0.0",
25
  "tensorflow>=2.8.0",
26
  "tensorflow-hub>=0.12.0",
27
- "yt-dlp>=2025.05.22"
 
28
  ]
29
 
30
  [project.optional-dependencies]
 
8
 
9
  [project]
10
  name = "gradio_labanmovementanalysis"
11
+ version = "0.0.7"
12
+ description = "Video movement analysis using Laban Movement Analysis (LMA)"
13
  readme = "README.md"
14
  license = "apache-2.0"
15
  authors = [{ name = "Csaba Bolyós", email = "bladeszasza@gmail.com" }]
 
24
  "ultralytics>=8.0.0",
25
  "tensorflow>=2.8.0",
26
  "tensorflow-hub>=0.12.0",
27
+ "yt-dlp>=2025.05.22",
28
+ "gradio_overlay_video>=0.0.9"
29
  ]
30
 
31
  [project.optional-dependencies]
src/requirements.txt CHANGED
@@ -5,4 +5,5 @@
5
  ultralytics>=8.0.0,
6
  tensorflow>=2.8.0,
7
  tensorflow-hub>=0.12.0,
8
- yt-dlp>=2025.05.22
 
 
5
  ultralytics>=8.0.0,
6
  tensorflow>=2.8.0,
7
  tensorflow-hub>=0.12.0,
8
+ yt-dlp>=2025.05.22,
9
+ gradio_overlay_video>=0.0.9