Spaces:
Runtime error
Runtime error
Commit
·
a2bc65a
1
Parent(s):
ec30fee
Code refactoring, cancel buttons, multiple result views
Browse files- annotation_editor.js +6 -3
- annotation_handler.py +18 -18
- app.py +163 -176
- aris.py +8 -23
- gradio_components/result_ui.py +117 -0
- gradio_components/upload_ui.py +30 -0
- temp.mp4 +0 -0
- uploader.py +10 -3
annotation_editor.js
CHANGED
@@ -14,6 +14,8 @@
|
|
14 |
window.TR = 1;
|
15 |
window.BL = 2;
|
16 |
window.BR = 3;
|
|
|
|
|
17 |
|
18 |
|
19 |
window.init = () => {
|
@@ -25,7 +27,7 @@
|
|
25 |
|
26 |
show_frame();
|
27 |
}
|
28 |
-
|
29 |
window.prev_frame = () => {
|
30 |
window.frame_index = Math.max(window.frame_index - 1, 0);
|
31 |
show_frame();
|
@@ -106,8 +108,9 @@
|
|
106 |
prettify_annotation();
|
107 |
}
|
108 |
color_from_id = (id) => {
|
109 |
-
|
110 |
-
|
|
|
111 |
return 'hsl(' + Math.floor(hue*359) + ', 100%, 50%)'
|
112 |
}
|
113 |
|
|
|
14 |
window.TR = 1;
|
15 |
window.BL = 2;
|
16 |
window.BR = 3;
|
17 |
+
|
18 |
+
window.frame_index = 0;
|
19 |
|
20 |
|
21 |
window.init = () => {
|
|
|
27 |
|
28 |
show_frame();
|
29 |
}
|
30 |
+
|
31 |
window.prev_frame = () => {
|
32 |
window.frame_index = Math.max(window.frame_index - 1, 0);
|
33 |
show_frame();
|
|
|
108 |
prettify_annotation();
|
109 |
}
|
110 |
color_from_id = (id) => {
|
111 |
+
//hue = Math.floor((number * 137.508 + 60) % 360)
|
112 |
+
power = Math.pow(2, Math.ceil(Math.log2(id)));
|
113 |
+
hue = (2*id - power - 1) / power;
|
114 |
return 'hsl(' + Math.floor(hue*359) + ', 100%, 50%)'
|
115 |
}
|
116 |
|
annotation_handler.py
CHANGED
@@ -6,27 +6,28 @@ import base64
|
|
6 |
|
7 |
VIDEO_HEIGHT = 700
|
8 |
|
9 |
-
def load_frames(
|
10 |
"""Load frames for annotation editing
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
"""
|
|
|
12 |
|
13 |
-
|
|
|
|
|
|
|
14 |
|
15 |
-
didson = dataset.didson
|
16 |
-
|
17 |
-
frames = didson.load_frames(start_frame=0)
|
18 |
-
frame_info, h, w = get_frame_info(frames, preds)
|
19 |
-
|
20 |
-
return frame_info
|
21 |
|
22 |
-
def get_frame_info(frames, preds):
|
23 |
-
"""Get visualized video frames ready for output, given raw ARIS/DIDSON frames.
|
24 |
-
Warning: all frames in frames will be stored in memory - careful of OOM errors. Consider processing large files
|
25 |
-
in batches, such as in generate_video_batches()
|
26 |
-
|
27 |
-
Returns:
|
28 |
-
list(np.ndarray), height (int), width (int)
|
29 |
-
"""
|
30 |
color_map = { fish['id'] : fish['color'] for fish in preds['fish'] }
|
31 |
|
32 |
frame_info = []
|
@@ -52,7 +53,6 @@ def get_frame_info(frames, preds):
|
|
52 |
}
|
53 |
for fish in preds['frames'][i]['fish']:
|
54 |
xmin, ymin, xmax, ymax = fish['bbox']
|
55 |
-
hexx = color_map[fish['fish_id']].lstrip('#')
|
56 |
frame['annotations'].append({
|
57 |
'bbox': {
|
58 |
'left': int(round(xmin * w)),
|
@@ -65,4 +65,4 @@ def get_frame_info(frames, preds):
|
|
65 |
})
|
66 |
frame_info.append(frame)
|
67 |
|
68 |
-
return frame_info
|
|
|
6 |
|
7 |
VIDEO_HEIGHT = 700
|
8 |
|
9 |
+
def load_frames(video, preds):
|
10 |
"""Load frames for annotation editing
|
11 |
+
|
12 |
+
|
13 |
+
Returns:
|
14 |
+
list({
|
15 |
+
frame: frame image as base64 string,
|
16 |
+
annotations: list(
|
17 |
+
bbox: dict of int defining bounding box {left, right, top, bottom},
|
18 |
+
id: id of fish as int,
|
19 |
+
conf: confidence in bbox as float
|
20 |
+
)
|
21 |
+
})
|
22 |
"""
|
23 |
+
if type(video) == str:
|
24 |
|
25 |
+
dataloader, dataset = create_dataloader_aris(video, BEAM_WIDTH_DIR, None)
|
26 |
+
frames = dataset.didson.load_frames(start_frame=0)
|
27 |
+
else:
|
28 |
+
frames = video
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
color_map = { fish['id'] : fish['color'] for fish in preds['fish'] }
|
32 |
|
33 |
frame_info = []
|
|
|
53 |
}
|
54 |
for fish in preds['frames'][i]['fish']:
|
55 |
xmin, ymin, xmax, ymax = fish['bbox']
|
|
|
56 |
frame['annotations'].append({
|
57 |
'bbox': {
|
58 |
'left': int(round(xmin * w)),
|
|
|
65 |
})
|
66 |
frame_info.append(frame)
|
67 |
|
68 |
+
return frame_info
|
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
-
from uploader import
|
3 |
from main import predict_task
|
4 |
from state_handler import load_example_result, reset_state
|
5 |
from visualizer import is_fourcc_available
|
@@ -14,40 +14,9 @@ from annotation_handler import load_frames
|
|
14 |
import json
|
15 |
from zipfile import ZipFile
|
16 |
import os
|
|
|
|
|
17 |
|
18 |
-
max_tabs = 10
|
19 |
-
table_headers = ["TOTAL" , "FRAME_NUM", "DIR", "R", "THETA", "L", "TIME", "DATE", "SPECIES"]
|
20 |
-
info_headers = [
|
21 |
-
["TOTAL_TIME", "DATE", "START", "END", "TOTAL_FRAMES", "FRAME_RATE"],
|
22 |
-
["TOTAL_FISH", "UPSTREAM_FISH", "DOWNSTREAM_FISH", "NONDIRECTIONAL_FISH"],
|
23 |
-
["UPSTREAM_MOTION", "INTENSITY", "THRESHOLD", "WATER_TEMP"]
|
24 |
-
]
|
25 |
-
css = """
|
26 |
-
<style>
|
27 |
-
#result_json {
|
28 |
-
height: 500px;
|
29 |
-
overflow: scroll !important;
|
30 |
-
}
|
31 |
-
#marking_json thead {
|
32 |
-
display: none !important;
|
33 |
-
}
|
34 |
-
#canvas {
|
35 |
-
align-self: center;
|
36 |
-
}
|
37 |
-
</style>
|
38 |
-
"""
|
39 |
-
js_update_tabs = """
|
40 |
-
async () => {
|
41 |
-
let el_list = document.getElementById("result_handler").getElementsByClassName("svelte-1kcgrqr")
|
42 |
-
let idx = (el_list[1].value === "LOADING") ? 1 : parseInt(el_list[1].value)
|
43 |
-
console.log(idx)
|
44 |
-
style_sheet = document.getElementById("tab_style")
|
45 |
-
style_sheet.innerHTML = ""
|
46 |
-
for (let i = 1; i <= idx; i++) {
|
47 |
-
style_sheet.innerHTML += "#result_tabs button.svelte-kqij2n:nth-child(" + i + "):before {content: 'Result " + i + "';}"
|
48 |
-
}
|
49 |
-
}
|
50 |
-
"""
|
51 |
|
52 |
#Initialize State & Result
|
53 |
state = {
|
@@ -58,10 +27,8 @@ state = {
|
|
58 |
result = {}
|
59 |
|
60 |
|
61 |
-
|
62 |
-
|
63 |
-
# Start function, called on file upload
|
64 |
-
def on_input(file_list):
|
65 |
|
66 |
# Reset Result
|
67 |
reset_state(result, state)
|
@@ -70,11 +37,73 @@ def on_input(file_list):
|
|
70 |
|
71 |
# Update loading_space to start inference on first file
|
72 |
return {
|
73 |
-
inference_handler: gr.update(value = str(np.random.rand()), visible=True)
|
|
|
|
|
|
|
74 |
}
|
75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
# Iterative function that performs inference on the next file in line
|
77 |
-
def
|
78 |
|
79 |
if state['index'] >= state['total']:
|
80 |
return {
|
@@ -126,33 +155,40 @@ def handle_next(_, progress=gr.Progress()):
|
|
126 |
# Send of update to result_handler to show new result
|
127 |
# Leave inference_handler update blank to avoid starting next inference until result is updated
|
128 |
return {
|
129 |
-
result_handler: gr.update(value = str(
|
|
|
130 |
inference_handler: gr.update()
|
131 |
}
|
132 |
|
133 |
-
# Show result
|
134 |
-
def
|
135 |
-
|
136 |
-
state["index"] = 1
|
137 |
-
return gr.update(value=str(state["index"]))
|
138 |
-
|
139 |
-
def show_data():
|
140 |
-
# Get last index
|
141 |
i = state["index"] - 1
|
|
|
142 |
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
|
|
148 |
|
149 |
-
# Check if inference is done
|
150 |
-
not_done = state['index'] < state['total']
|
151 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
|
153 |
annotation_html = ""
|
154 |
-
if result["aris_input"][
|
155 |
-
frame_info = load_frames(result["aris_input"][
|
156 |
|
157 |
annotation_html = "<div style='display:flex'>"
|
158 |
annotation_html += "<canvas id='canvas' style='width:50%' onmousedown='mouse_down(event)' onmousemove='mouse_move(event)' onmouseup='mouse_up()' onmouseleave='mouse_up()'></canvas>"
|
@@ -160,64 +196,12 @@ def show_data():
|
|
160 |
annotation_html += "</div>"
|
161 |
annotation_html += "<p id='annotation_info' style='display:none'>" + json.dumps(frame_info) + "</p>"
|
162 |
annotation_html += "<img id='annotation_img' onload='draw()' style='display:none'></img>"
|
|
|
163 |
|
164 |
-
|
165 |
-
# Send update to UI, and to inference_handler to start next file inference
|
166 |
-
return {
|
167 |
-
zip_out: gr.update(value=result["path_zip"]),
|
168 |
-
tabs[i]['tab']: gr.update(),
|
169 |
-
tabs[i]['video']: gr.update(value=result["path_video"][i], visible=True),
|
170 |
-
tabs[i]['metadata']: gr.update(value=result["fish_info"][i], visible=True),
|
171 |
-
tabs[i]['table']: gr.update(value=result["fish_table"][i], visible=True),
|
172 |
-
tabs[i]['annotation']: gr.update(value=annotation_html, visible=True),
|
173 |
-
tab_parent: gr.update(selected=i),
|
174 |
-
inference_handler: gr.update(value = str(np.random.rand()), visible=not_done)
|
175 |
-
}
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
def preview_result(zip_info, aris_info):
|
180 |
-
zip_name = zip_info[0]
|
181 |
-
print(zip_name)
|
182 |
-
if (aris_info):
|
183 |
-
print(aris_info[0])
|
184 |
-
file_name = aris_info[0].split("/")[-1]
|
185 |
-
bytes = aris_info[1]
|
186 |
-
valid, file_path, dir_name = save_data(bytes, file_name)
|
187 |
-
else:
|
188 |
-
dir_name = create_data_dir()
|
189 |
-
file_path = None
|
190 |
-
|
191 |
-
with ZipFile(zip_name) as zip_file:
|
192 |
-
ZipFile.extractall(zip_file, path=dir_name)
|
193 |
-
|
194 |
-
unzipped = os.listdir(dir_name)
|
195 |
-
print(unzipped)
|
196 |
-
|
197 |
-
reset_state(result, state)
|
198 |
-
state["index"] = 1
|
199 |
-
for file in unzipped:
|
200 |
-
if (file.endswith("_results.mp4")):
|
201 |
-
result["path_video"].append(os.path.join(dir_name, file))
|
202 |
-
elif (file.endswith("_results.json")):
|
203 |
-
result["path_json"].append(os.path.join(dir_name, file))
|
204 |
-
elif (file.endswith("_marking.txt")):
|
205 |
-
result["path_marking"].append(os.path.join(dir_name, file))
|
206 |
-
|
207 |
-
result["aris_input"].append(file_path)
|
208 |
-
with open(result['path_json'][0]) as f:
|
209 |
-
json_result = json.load(f)
|
210 |
-
result['json_result'].append(json_result)
|
211 |
-
fish_table, fish_info = create_metadata_table(json_result, table_headers, info_headers)
|
212 |
-
result["fish_table"].append(fish_table)
|
213 |
-
result["fish_info"].append(fish_info)
|
214 |
|
215 |
-
return {
|
216 |
-
result_handler: gr.update(value = str(state["index"])),
|
217 |
-
inference_handler: gr.update()
|
218 |
-
}
|
219 |
-
|
220 |
|
|
|
221 |
|
222 |
demo = gr.Blocks()
|
223 |
with demo:
|
@@ -227,68 +211,54 @@ with demo:
|
|
227 |
gr.HTML(
|
228 |
"""
|
229 |
<h1 align="center" style="font-size:xxx-large">Caltech Fisheye</h1>
|
230 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
231 |
<style id="tab_style"></style>
|
232 |
"""
|
233 |
)
|
234 |
|
235 |
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
for i in range(max_tabs):
|
272 |
-
with gr.Tab(label="", id=i, elem_id="result_tab"+str(i)) as tab:
|
273 |
-
metadata_out = gr.Matrix(label="Info", interactive=False, headers=[""]*6, datatype="markdown", visible=False, elem_id="marking_json")
|
274 |
-
table_out = gr.Matrix(label='Indentified Fish', headers=table_headers, interactive=False, visible=False)
|
275 |
-
video_out = gr.Video(label='Annotated Video', interactive=False, visible=False)
|
276 |
-
annotation_editor = gr.HTML("""""", visible=False)
|
277 |
-
|
278 |
-
with open('annotation_editor.js', 'r') as f:
|
279 |
-
js = f.read()
|
280 |
-
annotation_editor.change(lambda x: gr.update(), None, annotation_editor, _js=js)
|
281 |
-
tabs.append({
|
282 |
-
'tab': tab,
|
283 |
-
'metadata': metadata_out,
|
284 |
-
'video': video_out,
|
285 |
-
'table': table_out,
|
286 |
-
'annotation': annotation_editor
|
287 |
-
})
|
288 |
-
UI_components.extend([tab, metadata_out, video_out, table_out, annotation_editor])
|
289 |
-
|
290 |
-
# Button to show example result
|
291 |
-
#gr.Button(value="Show Example Result").click(show_example_data, None, result_handler)
|
292 |
|
293 |
# Disclaimer at the bottom of page
|
294 |
gr.HTML(
|
@@ -300,21 +270,38 @@ with demo:
|
|
300 |
"""
|
301 |
)
|
302 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
303 |
# When a file is uploaded to the input, tell the inference_handler to start inference
|
304 |
-
input.upload(
|
305 |
|
306 |
# When inference handler updates, tell result_handler to show the new result
|
307 |
# Also, add inference_handler as the output in order to have it display the progress
|
308 |
-
inference_handler.change(
|
309 |
|
310 |
# Send UI changes based on the new results to the UI_components, and tell the inference_handler to start next inference
|
311 |
-
result_handler.change(
|
312 |
-
|
|
|
|
|
|
|
|
|
313 |
# Button to load a previous result and view visualization
|
314 |
-
preview_result_btn.click(
|
|
|
|
|
|
|
|
|
315 |
|
316 |
demo.queue().launch()
|
317 |
|
318 |
-
|
319 |
|
320 |
|
|
|
1 |
import gradio as gr
|
2 |
+
from uploader import save_data_to_dir, create_data_dir, save_data
|
3 |
from main import predict_task
|
4 |
from state_handler import load_example_result, reset_state
|
5 |
from visualizer import is_fourcc_available
|
|
|
14 |
import json
|
15 |
from zipfile import ZipFile
|
16 |
import os
|
17 |
+
from gradio_components.upload_ui import Upload_Gradio
|
18 |
+
from gradio_components.result_ui import Result_Gradio, update_result, table_headers, info_headers, js_update_tab_labels
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
#Initialize State & Result
|
22 |
state = {
|
|
|
27 |
result = {}
|
28 |
|
29 |
|
30 |
+
# Called when an Aris file is uploaded for inference
|
31 |
+
def on_aris_input(file_list):
|
|
|
|
|
32 |
|
33 |
# Reset Result
|
34 |
reset_state(result, state)
|
|
|
37 |
|
38 |
# Update loading_space to start inference on first file
|
39 |
return {
|
40 |
+
inference_handler: gr.update(value = str(np.random.rand()), visible=True),
|
41 |
+
components['cancelBtn']: gr.update(visible=True),
|
42 |
+
components['skipBtn']: gr.update(visible=True),
|
43 |
+
master_tabs: gr.update(selected=1)
|
44 |
}
|
45 |
|
46 |
+
# Called when a result zip file is uploaded for result review
|
47 |
+
def on_result_upload(zip_list, aris_list):
|
48 |
+
|
49 |
+
reset_state(result, state)
|
50 |
+
|
51 |
+
component_updates = {
|
52 |
+
master_tabs: gr.update(selected=1),
|
53 |
+
tab_labeler: gr.update(value = len(zip_list))
|
54 |
+
}
|
55 |
+
|
56 |
+
for i in range(len(zip_list)):
|
57 |
+
|
58 |
+
# Create dir to unzip files
|
59 |
+
dir_name = create_data_dir(str(i))
|
60 |
+
|
61 |
+
# Check aris input
|
62 |
+
if (aris_list):
|
63 |
+
aris_info = aris_list[i]
|
64 |
+
file_name = aris_info[0].split("/")[-1]
|
65 |
+
bytes = aris_info[1]
|
66 |
+
valid, input_path, dir_name = save_data_to_dir(bytes, file_name, dir_name)
|
67 |
+
else:
|
68 |
+
input_path = None
|
69 |
+
|
70 |
+
# Unzip result
|
71 |
+
zip_info = zip_list[i]
|
72 |
+
zip_name = zip_info[0]
|
73 |
+
print(zip_name)
|
74 |
+
with ZipFile(zip_name) as zip_file:
|
75 |
+
ZipFile.extractall(zip_file, path=dir_name)
|
76 |
+
unzipped = os.listdir(dir_name)
|
77 |
+
print(unzipped)
|
78 |
+
|
79 |
+
for file in unzipped:
|
80 |
+
if (file.endswith("_results.mp4")):
|
81 |
+
result["path_video"].append(os.path.join(dir_name, file))
|
82 |
+
elif (file.endswith("_results.json")):
|
83 |
+
result["path_json"].append(os.path.join(dir_name, file))
|
84 |
+
elif (file.endswith("_marking.txt")):
|
85 |
+
result["path_marking"].append(os.path.join(dir_name, file))
|
86 |
+
|
87 |
+
result["aris_input"].append(input_path)
|
88 |
+
with open(result['path_json'][-1]) as f:
|
89 |
+
json_result = json.load(f)
|
90 |
+
result['json_result'].append(json_result)
|
91 |
+
fish_table, fish_info = create_metadata_table(json_result, table_headers, info_headers)
|
92 |
+
result["fish_table"].append(fish_table)
|
93 |
+
result["fish_info"].append(fish_info)
|
94 |
+
|
95 |
+
print(result['aris_input'])
|
96 |
+
update = update_result(i, state, result, inference_handler)
|
97 |
+
|
98 |
+
for key in update.keys():
|
99 |
+
component_updates[key] = update[key]
|
100 |
+
|
101 |
+
component_updates.pop(inference_handler)
|
102 |
+
return component_updates
|
103 |
+
|
104 |
+
|
105 |
# Iterative function that performs inference on the next file in line
|
106 |
+
def infer_next(_, progress=gr.Progress()):
|
107 |
|
108 |
if state['index'] >= state['total']:
|
109 |
return {
|
|
|
155 |
# Send of update to result_handler to show new result
|
156 |
# Leave inference_handler update blank to avoid starting next inference until result is updated
|
157 |
return {
|
158 |
+
result_handler: gr.update(value = str(np.random.rand())),
|
159 |
+
tab_labeler: gr.update(value = str(state['index'])),
|
160 |
inference_handler: gr.update()
|
161 |
}
|
162 |
|
163 |
+
# Show result
|
164 |
+
def on_result_ready():
|
165 |
+
# Update result tab for last file
|
|
|
|
|
|
|
|
|
|
|
166 |
i = state["index"] - 1
|
167 |
+
return update_result(i, state, result, inference_handler)
|
168 |
|
169 |
+
def cancel_inference():
|
170 |
+
return {
|
171 |
+
master_tabs: gr.update(selected=0),
|
172 |
+
inference_handler: gr.update(visible=False),
|
173 |
+
components['cancelBtn']: gr.update(visible=False),
|
174 |
+
components['skipBtn']: gr.update(visible=False)
|
175 |
+
}
|
176 |
|
|
|
|
|
177 |
|
178 |
+
# Request loading of animation editor
|
179 |
+
def prepare_annotation():
|
180 |
+
return {
|
181 |
+
annotation_progress: gr.update(value="<p align='center' style='font-size: large;font-style: italic;'>Loading annotation...</p><!--" + str(np.random.rand()) + "-->", visible=True),
|
182 |
+
master_tabs: gr.update(selected=2)
|
183 |
+
}
|
184 |
+
|
185 |
+
# Load frames and annotation information and show
|
186 |
+
def open_annotation(index):
|
187 |
+
print(index)
|
188 |
|
189 |
annotation_html = ""
|
190 |
+
if result["aris_input"][index]:
|
191 |
+
frame_info = load_frames(result["aris_input"][index], result['json_result'][index])
|
192 |
|
193 |
annotation_html = "<div style='display:flex'>"
|
194 |
annotation_html += "<canvas id='canvas' style='width:50%' onmousedown='mouse_down(event)' onmousemove='mouse_move(event)' onmouseup='mouse_up()' onmouseleave='mouse_up()'></canvas>"
|
|
|
196 |
annotation_html += "</div>"
|
197 |
annotation_html += "<p id='annotation_info' style='display:none'>" + json.dumps(frame_info) + "</p>"
|
198 |
annotation_html += "<img id='annotation_img' onload='draw()' style='display:none'></img>"
|
199 |
+
annotation_html += "<!--" + str(np.random.rand()) + "-->"
|
200 |
|
201 |
+
return gr.update(value=annotation_html, visible=True), gr.update(visible=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
|
|
|
|
|
|
|
|
|
|
|
203 |
|
204 |
+
components = {}
|
205 |
|
206 |
demo = gr.Blocks()
|
207 |
with demo:
|
|
|
211 |
gr.HTML(
|
212 |
"""
|
213 |
<h1 align="center" style="font-size:xxx-large">Caltech Fisheye</h1>
|
214 |
+
<style>
|
215 |
+
#marking_json thead {
|
216 |
+
display: none !important;
|
217 |
+
}
|
218 |
+
.selected.svelte-kqij2n {
|
219 |
+
background: linear-gradient(180deg, #66eecb47, white);
|
220 |
+
}
|
221 |
+
</style>
|
222 |
<style id="tab_style"></style>
|
223 |
"""
|
224 |
)
|
225 |
|
226 |
|
227 |
+
with gr.Tabs() as master_tabs:
|
228 |
+
components['master_tabs'] = master_tabs
|
229 |
+
|
230 |
+
# Master Tab for uploading aris or result files
|
231 |
+
with gr.Tab("Upload", id=0):
|
232 |
+
|
233 |
+
# Draw Gradio components related to the upload ui
|
234 |
+
Upload_Gradio(components)
|
235 |
+
|
236 |
+
# Master Tab for result visualization
|
237 |
+
with gr.Tab("Result", id=1):
|
238 |
+
|
239 |
+
|
240 |
+
# Define annotation progress bar for event listeres, but unrender since it will be displayed later on
|
241 |
+
annotation_progress = gr.HTML("", visible=False).unrender()
|
242 |
+
components['annotation_progress'] = annotation_progress
|
243 |
+
|
244 |
+
# Draw the gradio components related to visualzing result
|
245 |
+
vis_components = Result_Gradio(prepare_annotation, components)
|
246 |
+
|
247 |
+
# Master Tab for annotation editing
|
248 |
+
with gr.Tab("Annotation Editor", id=2):
|
249 |
+
|
250 |
+
# Draw the annotation loading bar here
|
251 |
+
annotation_progress.render()
|
252 |
+
|
253 |
+
# Add annotation editor component
|
254 |
+
annotation_editor = gr.HTML("", visible=False)
|
255 |
+
|
256 |
+
# Event listener for opening annotation
|
257 |
+
annotation_progress.change(open_annotation, annotation_progress, [annotation_editor, annotation_progress], _js="() => window.annotation_index")
|
258 |
+
|
259 |
+
# Event listener for running javascript defined in 'annotation_editor.js'
|
260 |
+
with open('annotation_editor.js', 'r') as f:
|
261 |
+
annotation_editor.change(lambda x: gr.update(), None, annotation_editor, _js=f.read())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
262 |
|
263 |
# Disclaimer at the bottom of page
|
264 |
gr.HTML(
|
|
|
270 |
"""
|
271 |
)
|
272 |
|
273 |
+
# Extract important components for ease of code
|
274 |
+
input = components['input']
|
275 |
+
inference_handler = components['inference_handler']
|
276 |
+
result_handler = components['result_handler']
|
277 |
+
tab_labeler = components['tab_labeler']
|
278 |
+
|
279 |
+
|
280 |
+
inference_comps = [inference_handler, master_tabs, components['cancelBtn'], components['skipBtn']]
|
281 |
+
|
282 |
# When a file is uploaded to the input, tell the inference_handler to start inference
|
283 |
+
input.upload(on_aris_input, input, inference_comps)
|
284 |
|
285 |
# When inference handler updates, tell result_handler to show the new result
|
286 |
# Also, add inference_handler as the output in order to have it display the progress
|
287 |
+
inference_event = inference_handler.change(infer_next, None, [inference_handler, result_handler, tab_labeler])
|
288 |
|
289 |
# Send UI changes based on the new results to the UI_components, and tell the inference_handler to start next inference
|
290 |
+
result_handler.change(on_result_ready, None, vis_components + [inference_handler])
|
291 |
+
|
292 |
+
# Cancel and skip buttons
|
293 |
+
components['cancelBtn'].click(cancel_inference, None, inference_comps, cancels=[inference_event])
|
294 |
+
components['skipBtn'].click(cancel_inference, None, inference_comps, cancels=[inference_event])
|
295 |
+
|
296 |
# Button to load a previous result and view visualization
|
297 |
+
components['preview_result_btn'].click(
|
298 |
+
on_result_upload,
|
299 |
+
[components['result_input'], components['result_aris_input']],
|
300 |
+
vis_components + [master_tabs, tab_labeler]
|
301 |
+
)
|
302 |
|
303 |
demo.queue().launch()
|
304 |
|
305 |
+
on_result_ready()
|
306 |
|
307 |
|
aris.py
CHANGED
@@ -450,29 +450,14 @@ def create_metadata_table(result, table_headers, info_headers):
|
|
450 |
table.append(row)
|
451 |
|
452 |
# Create info table
|
453 |
-
stacked_info = []
|
454 |
-
max_col = 0
|
455 |
-
for column in info_headers:
|
456 |
-
column_res = []
|
457 |
-
for field in column:
|
458 |
-
if field in metadata:
|
459 |
-
column_res.append([field, metadata[field]])
|
460 |
-
stacked_info.append(column_res)
|
461 |
-
if len(column_res) > max_col:
|
462 |
-
max_col = len(column_res)
|
463 |
-
|
464 |
info = []
|
465 |
-
for
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
else:
|
473 |
-
row.append(" ")
|
474 |
-
row.append(" ")
|
475 |
-
info.append(row)
|
476 |
return table, info
|
477 |
|
478 |
def create_manual_marking(results, out_path=None):
|
@@ -513,7 +498,7 @@ File Total Frame# Dir R (m) Theta L(cm) dR(cm) L/dR Aspect Time
|
|
513 |
for fish in metadata["FISH"]:
|
514 |
entry = {}
|
515 |
for field in fish.keys():
|
516 |
-
if
|
517 |
entry[field] = math.nan
|
518 |
else:
|
519 |
entry[field] = fish[field]
|
|
|
450 |
table.append(row)
|
451 |
|
452 |
# Create info table
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
453 |
info = []
|
454 |
+
for field in info_headers:
|
455 |
+
field_name = "**" + field + "**"
|
456 |
+
if field in metadata:
|
457 |
+
info.append([field_name, str(metadata[field])])
|
458 |
+
else:
|
459 |
+
info.append([field_name, ""])
|
460 |
+
|
|
|
|
|
|
|
|
|
461 |
return table, info
|
462 |
|
463 |
def create_manual_marking(results, out_path=None):
|
|
|
498 |
for fish in metadata["FISH"]:
|
499 |
entry = {}
|
500 |
for field in fish.keys():
|
501 |
+
if fish[field] == "nan":
|
502 |
entry[field] = math.nan
|
503 |
else:
|
504 |
entry[field] = fish[field]
|
gradio_components/result_ui.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
js_update_tab_labels = """
|
5 |
+
async () => {
|
6 |
+
let el_list = document.getElementById("tab_labeler").getElementsByClassName("svelte-1kcgrqr")
|
7 |
+
let idx = (el_list[1].value === "LOADING") ? 1 : parseInt(el_list[1].value)
|
8 |
+
console.log(idx)
|
9 |
+
style_sheet = document.getElementById("tab_style")
|
10 |
+
style_sheet.innerHTML = ""
|
11 |
+
for (let i = 1; i <= idx; i++) {
|
12 |
+
style_sheet.innerHTML += "#result_tabs button.svelte-kqij2n:nth-child(" + i + "):before {content: 'Result " + i + "';}"
|
13 |
+
}
|
14 |
+
}
|
15 |
+
"""
|
16 |
+
|
17 |
+
table_headers = ["TOTAL" , "FRAME_NUM", "DIR", "R", "THETA", "L", "TIME", "DATE", "SPECIES"]
|
18 |
+
info_headers = [
|
19 |
+
"TOTAL_TIME", "DATE", "START", "END", "TOTAL_FRAMES", "FRAME_RATE",
|
20 |
+
"TOTAL_FISH", "UPSTREAM_FISH", "DOWNSTREAM_FISH", "NONDIRECTIONAL_FISH",
|
21 |
+
"UPSTREAM_MOTION", "INTENSITY", "THRESHOLD", "WATER_TEMP"
|
22 |
+
]
|
23 |
+
max_tabs = 10
|
24 |
+
|
25 |
+
tabs = []
|
26 |
+
tab_parent = None
|
27 |
+
zip_out = None
|
28 |
+
|
29 |
+
def update_result(i, state, result, inference_handler):
|
30 |
+
# If index is larger than max_tabs, only add file to zip list
|
31 |
+
if i >= max_tabs:
|
32 |
+
return {
|
33 |
+
zip_out: gr.update(value=result["path_zip"])
|
34 |
+
}
|
35 |
+
|
36 |
+
# Check if inference is done
|
37 |
+
not_done = state['index'] < state['total']
|
38 |
+
|
39 |
+
annotation_avaliable = not (result["aris_input"][i] == None)
|
40 |
+
|
41 |
+
# Send update to UI, and to inference_handler to start next file inference
|
42 |
+
return {
|
43 |
+
zip_out: gr.update(value=result["path_zip"]),
|
44 |
+
tabs[i]['tab']: gr.update(),
|
45 |
+
tabs[i]['video']: gr.update(value=result["path_video"][i], visible=True),
|
46 |
+
tabs[i]['metadata']: gr.update(value=result["fish_info"][i], visible=True),
|
47 |
+
tabs[i]['table']: gr.update(value=result["fish_table"][i], visible=True),
|
48 |
+
tabs[i]['annotation_btn']: gr.update(visible=annotation_avaliable),
|
49 |
+
tab_parent: gr.update(selected=i),
|
50 |
+
inference_handler: gr.update(value = str(np.random.rand()), visible=not_done)
|
51 |
+
}
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
def Result_Gradio(prepare_annotation, components):
|
57 |
+
global tabs, tab_parent, zip_out
|
58 |
+
|
59 |
+
# Dummy element to call inference events, this also displays the inference progress
|
60 |
+
components['inference_handler'] = gr.Text(value=str(np.random.rand()), visible=False)
|
61 |
+
|
62 |
+
# Dummy element to call UI events
|
63 |
+
components['result_handler'] = gr.Text(value="LOADING", visible=False)
|
64 |
+
|
65 |
+
# Dummy element for updating tab titles
|
66 |
+
components['tab_labeler'] = gr.Text(value="", visible=False, elem_id="tab_labeler")
|
67 |
+
components['tab_labeler'].change(lambda x: x, None, None, _js=js_update_tab_labels)
|
68 |
+
|
69 |
+
with gr.Row():
|
70 |
+
components['cancelBtn'] = gr.Button("Cancel Inference", visible=False)
|
71 |
+
components['skipBtn'] = gr.Button("Skip this file", visible=False)
|
72 |
+
|
73 |
+
# List of all UI components that will recieve outputs from the result_handler
|
74 |
+
visual_components = []
|
75 |
+
|
76 |
+
# Zip file output
|
77 |
+
zip_out = gr.File(label="ZIP Output", interactive=False)
|
78 |
+
visual_components.append(zip_out)
|
79 |
+
|
80 |
+
|
81 |
+
# Create result tabs
|
82 |
+
tabs = []
|
83 |
+
with gr.Tabs(elem_id="result_tabs") as tab_parent:
|
84 |
+
visual_components.append(tab_parent)
|
85 |
+
|
86 |
+
# Create 'max_tab' tabs for showing result
|
87 |
+
for i in range(max_tabs):
|
88 |
+
with gr.Tab(label="", id=i, elem_id="result_tab"+str(i)) as tab:
|
89 |
+
|
90 |
+
with gr.Row():
|
91 |
+
# List of clip info (date, time, number of fish, temperature, etc.)
|
92 |
+
metadata_out = gr.Matrix(label="Info", interactive=False, headers=["Field", "Value"], datatype="markdown", visible=False, elem_id="marking_json")
|
93 |
+
|
94 |
+
# Annotated video
|
95 |
+
video_out = gr.Video(label='Annotated Video', interactive=False, visible=False)
|
96 |
+
|
97 |
+
|
98 |
+
# Table of found fish
|
99 |
+
table_out = gr.Matrix(label='Indentified Fish', headers=table_headers, interactive=False, visible=False)
|
100 |
+
|
101 |
+
# Button for opening result in annotation editor
|
102 |
+
annotation_btn = gr.Button("Edit Annotation", visible=False)
|
103 |
+
annotation_btn.click(prepare_annotation, None, [components['annotation_progress'], components['master_tabs']], _js="() => window.annotation_index=" + str(i))
|
104 |
+
|
105 |
+
# Add components to tab dict for easy access later on
|
106 |
+
tabs.append({
|
107 |
+
'tab': tab,
|
108 |
+
'metadata': metadata_out,
|
109 |
+
'video': video_out,
|
110 |
+
'table': table_out,
|
111 |
+
'annotation_btn': annotation_btn
|
112 |
+
})
|
113 |
+
|
114 |
+
# Add all components to list of visualization outputs
|
115 |
+
visual_components.extend([tab, metadata_out, video_out, table_out, annotation_btn])
|
116 |
+
|
117 |
+
return visual_components
|
gradio_components/upload_ui.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from file_reader import File
|
3 |
+
|
4 |
+
|
5 |
+
def Upload_Gradio(gradio_components):
|
6 |
+
with gr.Tabs():
|
7 |
+
|
8 |
+
# Tab - uploading aris files for inference
|
9 |
+
with gr.Tab("Infer ARIS"):
|
10 |
+
|
11 |
+
gr.HTML("<p align='center' style='font-size: large;font-style: italic;'>Submit an .aris file to analyze result.</p>")
|
12 |
+
|
13 |
+
#Input field for aris submission
|
14 |
+
gradio_components['input'] = File(file_types=[".aris", ".ddf"], type="binary", label="ARIS Input", file_count="multiple")
|
15 |
+
|
16 |
+
# Tab - uploading old result files to review
|
17 |
+
with gr.Tab("Open Result"):
|
18 |
+
gr.HTML("""
|
19 |
+
<p align='center' style='font-size: large;font-style: italic;'>Submit an old zip file of results to visualize.</p>
|
20 |
+
<p align='center' style='font-size: large;font-style: italic;'>If you want to edit annotations, also submit an aris file.</p>
|
21 |
+
""")
|
22 |
+
|
23 |
+
# Input for .zip result file
|
24 |
+
gradio_components['result_input'] = File(file_types=[".zip"], type="binary", label="Upload result file", file_count="multiple")
|
25 |
+
|
26 |
+
# Optional input for aris file to help with annotation editing
|
27 |
+
gradio_components['result_aris_input'] = File(file_types=[".aris", ".ddf"], type="binary", label="Upload aris file (optional)", file_count="multiple")
|
28 |
+
|
29 |
+
# Button for initializing review
|
30 |
+
gradio_components['preview_result_btn'] = gr.Button("View Result")
|
temp.mp4
CHANGED
Binary files a/temp.mp4 and b/temp.mp4 differ
|
|
uploader.py
CHANGED
@@ -5,10 +5,15 @@ from datetime import datetime;
|
|
5 |
USER_DATA_DIR = "user_data/"
|
6 |
|
7 |
|
8 |
-
def save_data(bytes, filename):
|
9 |
"""Take a file and saved it to a new user_data folder"""
|
10 |
|
11 |
-
dirname = create_data_dir()
|
|
|
|
|
|
|
|
|
|
|
12 |
filepath = os.path.join(dirname, filename)
|
13 |
|
14 |
assert bytes[0:3] == b'DDF'
|
@@ -32,9 +37,11 @@ def allowed_file(filename):
|
|
32 |
return '.' in filename and \
|
33 |
filename.rsplit('.', 1)[1].lower() in ['aris', 'ddf']
|
34 |
|
35 |
-
def create_data_dir():
|
36 |
"""Create a (probably) unique directory for a task."""
|
37 |
dirname = os.path.join(USER_DATA_DIR, str(int(datetime.now().timestamp())))
|
|
|
|
|
38 |
if os.path.exists(dirname):
|
39 |
print("Warning,", dirname, "already exists.")
|
40 |
os.makedirs(dirname, exist_ok=True)
|
|
|
5 |
USER_DATA_DIR = "user_data/"
|
6 |
|
7 |
|
8 |
+
def save_data(bytes, filename, identifier=None):
|
9 |
"""Take a file and saved it to a new user_data folder"""
|
10 |
|
11 |
+
dirname = create_data_dir(identifier)
|
12 |
+
|
13 |
+
return save_data_to_dir(bytes, filename, dirname)
|
14 |
+
|
15 |
+
def save_data_to_dir(bytes, filename, dirname):
|
16 |
+
|
17 |
filepath = os.path.join(dirname, filename)
|
18 |
|
19 |
assert bytes[0:3] == b'DDF'
|
|
|
37 |
return '.' in filename and \
|
38 |
filename.rsplit('.', 1)[1].lower() in ['aris', 'ddf']
|
39 |
|
40 |
+
def create_data_dir(identifier = None):
|
41 |
"""Create a (probably) unique directory for a task."""
|
42 |
dirname = os.path.join(USER_DATA_DIR, str(int(datetime.now().timestamp())))
|
43 |
+
if identifier:
|
44 |
+
dirname = os.path.join(dirname, identifier)
|
45 |
if os.path.exists(dirname):
|
46 |
print("Warning,", dirname, "already exists.")
|
47 |
os.makedirs(dirname, exist_ok=True)
|