File size: 2,197 Bytes
030b39a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import cv2
import numpy as np
import mediapipe as mp
import gradio as gr
import tempfile
import os

mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose

# 棒人間を描画する関数
def draw_pose_landmarks(image, results):
    annotated_image = image.copy()
    if results.pose_landmarks:
        mp_drawing.draw_landmarks(
            annotated_image,
            results.pose_landmarks,
            mp_pose.POSE_CONNECTIONS,
            landmark_drawing_spec=mp_drawing.DrawingSpec(color=(0,255,0), thickness=2, circle_radius=2),
            connection_drawing_spec=mp_drawing.DrawingSpec(color=(255,0,0), thickness=2)
        )
    return annotated_image

# メイン処理:動画→ポーズ認識→棒人間→新動画出力
def process_video(video_file):
    with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_output:
        output_path = temp_output.name

    cap = cv2.VideoCapture(video_file)
    width  = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps    = cap.get(cv2.CAP_PROP_FPS)

    out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (width, height))

    with mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break

            # RGBに変換
            image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            results = pose.process(image_rgb)

            # 棒人間描画
            annotated_frame = draw_pose_landmarks(frame, results)
            out.write(annotated_frame)

    cap.release()
    out.release()

    return output_path

# Gradioインターフェース
interface = gr.Interface(
    fn=process_video,
    inputs=gr.Video(label="動画をアップロードしてください"),
    outputs=gr.Video(label="ポーズ認識後の動画"),
    title="MediaPipeによる棒人間ポーズ認識",
    description="アップロードした動画に対してMediaPipeでポーズ検出し、棒人間を描画します。"
)

if __name__ == "__main__":
    interface.launch()