upload inference script
Browse files- inference_matanyone_api.py +180 -0
inference_matanyone_api.py
ADDED
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import cv2
|
3 |
+
import tqdm
|
4 |
+
import random
|
5 |
+
import imageio
|
6 |
+
import numpy as np
|
7 |
+
from PIL import Image
|
8 |
+
|
9 |
+
import torch
|
10 |
+
import torchvision
|
11 |
+
import torch.nn.functional as F
|
12 |
+
|
13 |
+
from matanyone.model.matanyone import MatAnyone
|
14 |
+
from matanyone.inference.inference_core import InferenceCore
|
15 |
+
|
16 |
+
import warnings
|
17 |
+
warnings.filterwarnings("ignore")
|
18 |
+
|
19 |
+
IMAGE_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.JPG', '.JPEG', '.PNG')
|
20 |
+
VIDEO_EXTENSIONS = ('.mp4', '.mov', '.avi', '.MP4', '.MOV', '.AVI')
|
21 |
+
|
22 |
+
def read_frame_from_videos(frame_root):
|
23 |
+
if frame_root.endswith(VIDEO_EXTENSIONS): # Video file path
|
24 |
+
video_name = os.path.basename(frame_root)[:-4]
|
25 |
+
frames, _, info = torchvision.io.read_video(filename=frame_root, pts_unit='sec', output_format='TCHW') # RGB
|
26 |
+
fps = info['video_fps']
|
27 |
+
else:
|
28 |
+
video_name = os.path.basename(frame_root)
|
29 |
+
frames = []
|
30 |
+
fr_lst = sorted(os.listdir(frame_root))
|
31 |
+
for fr in fr_lst:
|
32 |
+
frame = cv2.imread(os.path.join(frame_root, fr))[...,[2,1,0]] # RGB, HWC
|
33 |
+
frames.append(frame)
|
34 |
+
fps = 24 # default
|
35 |
+
frames = torch.Tensor(np.array(frames)).permute(0, 3, 1, 2).contiguous() # TCHW
|
36 |
+
|
37 |
+
length = frames.shape[0]
|
38 |
+
|
39 |
+
return frames, fps, length, video_name
|
40 |
+
|
41 |
+
def gen_dilate(alpha, min_kernel_size, max_kernel_size):
|
42 |
+
kernel_size = random.randint(min_kernel_size, max_kernel_size)
|
43 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size,kernel_size))
|
44 |
+
fg_and_unknown = np.array(np.not_equal(alpha, 0).astype(np.float32))
|
45 |
+
dilate = cv2.dilate(fg_and_unknown, kernel, iterations=1)*255
|
46 |
+
return dilate.astype(np.float32)
|
47 |
+
|
48 |
+
def gen_erosion(alpha, min_kernel_size, max_kernel_size):
|
49 |
+
kernel_size = random.randint(min_kernel_size, max_kernel_size)
|
50 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size,kernel_size))
|
51 |
+
fg = np.array(np.equal(alpha, 255).astype(np.float32))
|
52 |
+
erode = cv2.erode(fg, kernel, iterations=1)*255
|
53 |
+
return erode.astype(np.float32)
|
54 |
+
|
55 |
+
@torch.inference_mode()
|
56 |
+
@torch.cuda.amp.autocast()
|
57 |
+
def main(input_path, mask_path, output_path, ckpt_path, n_warmup=10, r_erode=10, r_dilate=10, suffix="", save_image=False, max_size=-1):
|
58 |
+
|
59 |
+
matanyone = MatAnyone.from_pretrained("PeiqingYang/MatAnyone").cuda().eval()
|
60 |
+
processor = InferenceCore(matanyone, cfg=matanyone.cfg)
|
61 |
+
|
62 |
+
# inference parameters
|
63 |
+
r_erode = int(r_erode)
|
64 |
+
r_dilate = int(r_dilate)
|
65 |
+
n_warmup = int(n_warmup)
|
66 |
+
max_size = int(max_size)
|
67 |
+
|
68 |
+
# load input frames
|
69 |
+
vframes, fps, length, video_name = read_frame_from_videos(input_path)
|
70 |
+
repeated_frames = vframes[0].unsqueeze(0).repeat(n_warmup, 1, 1, 1) # repeat the first frame for warmup
|
71 |
+
vframes = torch.cat([repeated_frames, vframes], dim=0).float()
|
72 |
+
length += n_warmup # update length
|
73 |
+
|
74 |
+
# resize if needed
|
75 |
+
if max_size > 0:
|
76 |
+
h, w = vframes.shape[-2:]
|
77 |
+
min_side = min(h, w)
|
78 |
+
if min_side > max_size:
|
79 |
+
new_h = int(h / min_side * max_size)
|
80 |
+
new_w = int(w / min_side * max_size)
|
81 |
+
|
82 |
+
vframes = F.interpolate(vframes, size=(new_h, new_w), mode="area")
|
83 |
+
|
84 |
+
# set output paths
|
85 |
+
os.makedirs(output_path, exist_ok=True)
|
86 |
+
if suffix != "":
|
87 |
+
video_name = f'{video_name}_{suffix}'
|
88 |
+
if save_image:
|
89 |
+
os.makedirs(f'{output_path}/{video_name}', exist_ok=True)
|
90 |
+
os.makedirs(f'{output_path}/{video_name}/pha', exist_ok=True)
|
91 |
+
os.makedirs(f'{output_path}/{video_name}/fgr', exist_ok=True)
|
92 |
+
|
93 |
+
# load the first-frame mask
|
94 |
+
mask = Image.open(mask_path).convert('L')
|
95 |
+
mask = np.array(mask)
|
96 |
+
|
97 |
+
bgr = (np.array([120, 255, 155], dtype=np.float32)/255).reshape((1, 1, 3)) # green screen to paste fgr
|
98 |
+
objects = [1]
|
99 |
+
|
100 |
+
# [optional] erode & dilate
|
101 |
+
if r_dilate > 0:
|
102 |
+
mask = gen_dilate(mask, r_dilate, r_dilate)
|
103 |
+
if r_erode > 0:
|
104 |
+
mask = gen_erosion(mask, r_erode, r_erode)
|
105 |
+
|
106 |
+
mask = torch.from_numpy(mask).cuda()
|
107 |
+
|
108 |
+
if max_size > 0: # resize needed
|
109 |
+
mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0), size=(new_h, new_w), mode="nearest")
|
110 |
+
mask = mask[0,0]
|
111 |
+
|
112 |
+
# inference start
|
113 |
+
phas = []
|
114 |
+
fgrs = []
|
115 |
+
for ti in tqdm.tqdm(range(length)):
|
116 |
+
# load the image as RGB; normalization is done within the model
|
117 |
+
image = vframes[ti]
|
118 |
+
|
119 |
+
image_np = np.array(image.permute(1,2,0)) # for output visualize
|
120 |
+
image = (image / 255.).cuda().float() # for network input
|
121 |
+
|
122 |
+
if ti == 0:
|
123 |
+
output_prob = processor.step(image, mask, objects=objects) # encode given mask
|
124 |
+
output_prob = processor.step(image, first_frame_pred=True) # first frame for prediction
|
125 |
+
else:
|
126 |
+
if ti <= n_warmup:
|
127 |
+
output_prob = processor.step(image, first_frame_pred=True) # reinit as the first frame for prediction
|
128 |
+
else:
|
129 |
+
output_prob = processor.step(image)
|
130 |
+
|
131 |
+
# convert output probabilities to alpha matte
|
132 |
+
mask = processor.output_prob_to_mask(output_prob)
|
133 |
+
|
134 |
+
# visualize prediction
|
135 |
+
pha = mask.unsqueeze(2).cpu().numpy()
|
136 |
+
com_np = image_np / 255. * pha + bgr * (1 - pha)
|
137 |
+
|
138 |
+
# DONOT save the warmup frame
|
139 |
+
if ti > (n_warmup-1):
|
140 |
+
com_np = (com_np*255).astype(np.uint8)
|
141 |
+
pha = (pha*255).astype(np.uint8)
|
142 |
+
fgrs.append(com_np)
|
143 |
+
phas.append(pha)
|
144 |
+
if save_image:
|
145 |
+
cv2.imwrite(f'{output_path}/{video_name}/pha/{str(ti-n_warmup).zfill(5)}.png', pha)
|
146 |
+
cv2.imwrite(f'{output_path}/{video_name}/fgr/{str(ti-n_warmup).zfill(5)}.png', com_np[...,[2,1,0]])
|
147 |
+
|
148 |
+
phas = np.array(phas)
|
149 |
+
fgrs = np.array(fgrs)
|
150 |
+
|
151 |
+
imageio.mimwrite(f'{output_path}/{video_name}_fgr.mp4', fgrs, fps=fps, quality=7)
|
152 |
+
imageio.mimwrite(f'{output_path}/{video_name}_pha.mp4', phas, fps=fps, quality=7)
|
153 |
+
|
154 |
+
if __name__ == '__main__':
|
155 |
+
import argparse
|
156 |
+
parser = argparse.ArgumentParser()
|
157 |
+
parser.add_argument('-i', '--input_path', type=str, default="inputs/video/test-sample1.mp4", help='Path of the input video or frame folder.')
|
158 |
+
parser.add_argument('-m', '--mask_path', type=str, default="inputs/mask/test-sample1.png", help='Path of the first-frame segmentation mask.')
|
159 |
+
parser.add_argument('-o', '--output_path', type=str, default="results/", help='Output folder. Default: results')
|
160 |
+
parser.add_argument('-c', '--ckpt_path', type=str, default="pretrained_models/matanyone.pth", help='Path of the MatAnyone model.')
|
161 |
+
parser.add_argument('-w', '--warmup', type=str, default="10", help='Number of warmup iterations for the first frame alpha prediction.')
|
162 |
+
parser.add_argument('-e', '--erode_kernel', type=str, default="10", help='Erosion kernel on the input mask.')
|
163 |
+
parser.add_argument('-d', '--dilate_kernel', type=str, default="10", help='Dilation kernel on the input mask.')
|
164 |
+
parser.add_argument('--suffix', type=str, default="", help='Suffix to specify different target when saving, e.g., target1.')
|
165 |
+
parser.add_argument('--save_image', action='store_true', default=False, help='Save output frames. Default: False')
|
166 |
+
parser.add_argument('--max_size', type=str, default="-1", help='When positive, the video will be downsampled if min(w, h) exceeds. Default: -1 (means no limit)')
|
167 |
+
|
168 |
+
|
169 |
+
args = parser.parse_args()
|
170 |
+
|
171 |
+
main(input_path=args.input_path, \
|
172 |
+
mask_path=args.mask_path, \
|
173 |
+
output_path=args.output_path, \
|
174 |
+
ckpt_path=args.ckpt_path, \
|
175 |
+
n_warmup=args.warmup, \
|
176 |
+
r_erode=args.erode_kernel, \
|
177 |
+
r_dilate=args.dilate_kernel, \
|
178 |
+
suffix=args.suffix, \
|
179 |
+
save_image=args.save_image, \
|
180 |
+
max_size=args.max_size)
|