Spaces:
Running
on
Zero
Running
on
Zero
xiaoyuxi
commited on
Commit
·
8c76a8d
1
Parent(s):
4138a21
gradio_app
Browse files- _viz/viz_template.html +6 -6
- app.py +2 -2
_viz/viz_template.html
CHANGED
@@ -755,9 +755,9 @@
|
|
755 |
pointSize: 0.03,
|
756 |
pointOpacity: 1.0,
|
757 |
showTrajectory: true,
|
758 |
-
trajectoryLineWidth:
|
759 |
-
trajectoryBallSize: 0.
|
760 |
-
trajectoryHistory:
|
761 |
showCameraFrustum: true,
|
762 |
frustumSize: 0.2
|
763 |
};
|
@@ -805,9 +805,9 @@
|
|
805 |
pointSize: 0.03,
|
806 |
pointOpacity: 1.0,
|
807 |
showTrajectory: true,
|
808 |
-
trajectoryLineWidth:
|
809 |
-
trajectoryBallSize: 0.
|
810 |
-
trajectoryHistory:
|
811 |
showCameraFrustum: true,
|
812 |
frustumSize: 0.2
|
813 |
};
|
|
|
755 |
pointSize: 0.03,
|
756 |
pointOpacity: 1.0,
|
757 |
showTrajectory: true,
|
758 |
+
trajectoryLineWidth: 2.5,
|
759 |
+
trajectoryBallSize: 0.015,
|
760 |
+
trajectoryHistory: 0,
|
761 |
showCameraFrustum: true,
|
762 |
frustumSize: 0.2
|
763 |
};
|
|
|
805 |
pointSize: 0.03,
|
806 |
pointOpacity: 1.0,
|
807 |
showTrajectory: true,
|
808 |
+
trajectoryLineWidth: 2.5,
|
809 |
+
trajectoryBallSize: 0.015,
|
810 |
+
trajectoryHistory: 0,
|
811 |
showCameraFrustum: true,
|
812 |
frustumSize: 0.2
|
813 |
};
|
app.py
CHANGED
@@ -164,6 +164,7 @@ def gpu_run_tracker(tracker_model_arg, tracker_viser_arg, temp_dir, video_name,
|
|
164 |
mask = mask.sum(axis=-1)>0
|
165 |
else:
|
166 |
mask = np.ones_like(video_tensor[0,0].cpu().numpy())>0
|
|
|
167 |
|
168 |
# Get frame dimensions and create grid points
|
169 |
frame_H, frame_W = video_tensor.shape[2:]
|
@@ -177,7 +178,6 @@ def gpu_run_tracker(tracker_model_arg, tracker_viser_arg, temp_dir, video_name,
|
|
177 |
|
178 |
query_xyt = torch.cat([torch.zeros_like(grid_pts[:, :, :1]), grid_pts], dim=2)[0].cpu().numpy()
|
179 |
print(f"Query points shape: {query_xyt.shape}")
|
180 |
-
|
181 |
# Run model inference
|
182 |
with torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16):
|
183 |
(
|
@@ -571,7 +571,7 @@ def launch_viz(grid_size, vo_points, fps, original_image_state):
|
|
571 |
|
572 |
mask_files = glob.glob(os.path.join(temp_dir, "*.png"))
|
573 |
if not mask_files:
|
574 |
-
|
575 |
|
576 |
video_files = glob.glob(os.path.join(temp_dir, "*.mp4"))
|
577 |
if not video_files:
|
|
|
164 |
mask = mask.sum(axis=-1)>0
|
165 |
else:
|
166 |
mask = np.ones_like(video_tensor[0,0].cpu().numpy())>0
|
167 |
+
grid_size = 10
|
168 |
|
169 |
# Get frame dimensions and create grid points
|
170 |
frame_H, frame_W = video_tensor.shape[2:]
|
|
|
178 |
|
179 |
query_xyt = torch.cat([torch.zeros_like(grid_pts[:, :, :1]), grid_pts], dim=2)[0].cpu().numpy()
|
180 |
print(f"Query points shape: {query_xyt.shape}")
|
|
|
181 |
# Run model inference
|
182 |
with torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16):
|
183 |
(
|
|
|
571 |
|
572 |
mask_files = glob.glob(os.path.join(temp_dir, "*.png"))
|
573 |
if not mask_files:
|
574 |
+
mask_files = [None]
|
575 |
|
576 |
video_files = glob.glob(os.path.join(temp_dir, "*.mp4"))
|
577 |
if not video_files:
|