Natwar commited on
Commit
24f204c
·
verified ·
1 Parent(s): 76c6b8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -302
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # Install required packages
2
  import os
3
  import subprocess
4
  import sys
@@ -7,7 +7,6 @@ import pkg_resources
7
 
8
  def install_package(package, version=None):
9
  package_spec = f"{package}=={version}" if version else package
10
- print(f"Installing {package_spec}...")
11
  try:
12
  subprocess.check_call([sys.executable, "-m", "pip", "install", "--no-cache-dir", package_spec])
13
  except subprocess.CalledProcessError as e:
@@ -20,28 +19,18 @@ def ensure_package(package, version=None):
20
  pkg_resources.require(f"{package}=={version}")
21
  else:
22
  importlib.import_module(package)
23
- print(f"{package} is already installed with the correct version.")
24
- except (ImportError, pkg_resources.VersionConflict, pkg_resources.DistributionNotFound) as e:
25
- print(f"Package requirement failed: {e}")
26
  install_package(package, version)
27
 
28
- # Check if running in a standard environment (not Colab/Jupyter)
29
- if not os.path.exists("/.dockerenv") and not os.path.exists("/kaggle"):
30
- print("Setting up environment...")
31
-
32
- # Install packages in the correct order with compatible versions
33
- ensure_package("numpy", "1.23.5") # Compatible with TensorFlow 2.10
34
- ensure_package("protobuf", "3.20.3") # Critical for TensorFlow compatibility
35
- ensure_package("tensorflow", "2.10.0") # Stable version with good compatibility
36
-
37
- # Install core dependencies
38
- for pkg in ["gradio", "opencv-python-headless", "matplotlib", "pillow", "pandas"]:
39
- ensure_package(pkg)
40
-
41
- # Install deepface last after all dependencies are set up
42
- ensure_package("deepface")
43
-
44
- # Now import the required modules
45
  import gradio as gr
46
  import json
47
  import cv2
@@ -51,125 +40,71 @@ import tempfile
51
  import pandas as pd
52
  import shutil
53
  import matplotlib.pyplot as plt
54
-
55
- # Import DeepFace after ensuring dependencies are properly installed
56
  from deepface import DeepFace
57
 
 
58
  def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
59
  temp_dir = tempfile.mkdtemp()
60
  img1_path = os.path.join(temp_dir, "image1.jpg")
61
  img2_path = os.path.join(temp_dir, "image2.jpg")
62
 
63
- if isinstance(img1, np.ndarray):
64
- Image.fromarray(img1).save(img1_path)
65
- else:
66
- img1.save(img1_path)
67
-
68
- if isinstance(img2, np.ndarray):
69
- Image.fromarray(img2).save(img2_path)
70
- else:
71
- img2.save(img2_path)
72
 
73
  try:
74
- result = DeepFace.verify(
75
- img1_path=img1_path,
76
- img2_path=img2_path,
77
- model_name=model,
78
- distance_metric="cosine",
79
- threshold=threshold
80
- )
81
-
82
  fig, ax = plt.subplots(1, 2, figsize=(10, 5))
83
-
84
- img1_display = cv2.imread(img1_path)
85
- img1_display = cv2.cvtColor(img1_display, cv2.COLOR_BGR2RGB)
86
- img2_display = cv2.imread(img2_path)
87
- img2_display = cv2.cvtColor(img2_display, cv2.COLOR_BGR2RGB)
88
-
89
- ax[0].imshow(img1_display)
90
- ax[0].set_title("Image 1")
91
- ax[0].axis("off")
92
-
93
- ax[1].imshow(img2_display)
94
- ax[1].set_title("Image 2")
95
- ax[1].axis("off")
96
-
97
- verification_result = "✅ FACE MATCHED" if result["verified"] else "❌ FACE NOT MATCHED"
98
- confidence = round((1 - result["distance"]) * 100, 2)
99
-
100
- plt.suptitle(f"{verification_result}\nConfidence: {confidence}%\nDistance: {result['distance']:.4f}",
101
- fontsize=16, fontweight='bold',
102
- color='green' if result["verified"] else 'red')
103
-
104
  plt.tight_layout()
105
 
106
- os.remove(img1_path)
107
- os.remove(img2_path)
108
- os.rmdir(temp_dir)
109
-
110
  return fig, json.dumps(result, indent=2)
111
-
112
  except Exception as e:
113
- if os.path.exists(img1_path):
114
- os.remove(img1_path)
115
- if os.path.exists(img2_path):
116
- os.remove(img2_path)
117
- if os.path.exists(temp_dir):
118
- os.rmdir(temp_dir)
119
-
120
- error_msg = f"Error: {str(e)}"
121
- if "No face detected" in str(e):
122
- error_msg = "No face detected in one or both images. Please try different images."
123
-
124
- return None, error_msg
125
 
126
- def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
 
127
  temp_dir = tempfile.mkdtemp()
128
  query_path = os.path.join(temp_dir, "query.jpg")
129
-
130
- if isinstance(query_img, np.ndarray):
131
- Image.fromarray(query_img).save(query_path)
132
- else:
133
- query_img.save(query_path)
134
-
135
- if isinstance(db_folder, str):
136
- db_path = db_folder
137
- else:
138
- db_path = os.path.join(temp_dir, "db")
139
- os.makedirs(db_path, exist_ok=True)
140
-
141
- for i, file in enumerate(db_folder):
142
- file_ext = os.path.splitext(file.name)[1]
143
- shutil.copy(file.name, os.path.join(db_path, f"image_{i}{file_ext}"))
144
 
145
  try:
 
 
 
146
  dfs = DeepFace.find(
147
  img_path=query_path,
148
- db_path=db_path,
149
  model_name=model,
150
  distance_metric="cosine",
151
  threshold=threshold
152
  )
153
 
154
  if isinstance(dfs, list):
155
- if len(dfs) == 0:
156
- return None, "No matching faces found in the database."
157
- df = dfs[0]
158
  else:
159
  df = dfs
160
 
161
  if df.empty:
162
- return None, "No matching faces found in the database."
163
 
164
  df = df.sort_values(by=["distance"])
165
-
166
  num_matches = min(4, len(df))
167
  fig, axes = plt.subplots(1, num_matches + 1, figsize=(15, 5))
168
 
169
- query_display = cv2.imread(query_path)
170
- query_display = cv2.cvtColor(query_display, cv2.COLOR_BGR2RGB)
171
  axes[0].imshow(query_display)
172
- axes[0].set_title("Query Image")
173
  axes[0].axis("off")
174
 
175
  for i in range(num_matches):
@@ -177,235 +112,95 @@ def find_faces(query_img, db_folder, threshold=0.70, model="VGG-Face"):
177
  distance = df.iloc[i]["distance"]
178
  confidence = round((1 - distance) * 100, 2)
179
 
180
- match_img = cv2.imread(match_path)
181
- match_img = cv2.cvtColor(match_img, cv2.COLOR_BGR2RGB)
182
-
183
  axes[i+1].imshow(match_img)
184
  axes[i+1].set_title(f"Match #{i+1}\nConfidence: {confidence}%")
185
  axes[i+1].axis("off")
186
 
187
- plt.suptitle(f"Found {len(df)} matching faces", fontsize=16, fontweight='bold')
188
  plt.tight_layout()
189
 
190
- results = df[["identity", "distance"]].copy()
191
- results["confidence"] = (1 - results["distance"]) * 100
192
- results["confidence"] = results["confidence"].round(2)
193
- results = results.rename(columns={"identity": "Image Path"})
194
-
195
- os.remove(query_path)
196
- if not isinstance(db_folder, str):
197
- shutil.rmtree(db_path)
198
-
199
- return fig, results.to_dict('records')
200
 
 
 
201
  except Exception as e:
202
- if os.path.exists(query_path):
203
- os.remove(query_path)
204
-
205
- error_msg = f"Error: {str(e)}"
206
- if "No face detected" in str(e):
207
- error_msg = "No face detected in the query image. Please try a different image."
208
-
209
- return None, error_msg
210
 
 
211
  def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
212
  temp_dir = tempfile.mkdtemp()
213
  img_path = os.path.join(temp_dir, "analyze.jpg")
214
-
215
- if isinstance(img, np.ndarray):
216
- Image.fromarray(img).save(img_path)
217
- else:
218
- img.save(img_path)
219
 
220
  try:
221
- results = DeepFace.analyze(
222
- img_path=img_path,
223
- actions=actions,
224
- enforce_detection=True,
225
- detector_backend='opencv'
226
- )
227
-
228
- if isinstance(results, list):
229
- num_faces = len(results)
230
- else:
231
- num_faces = 1
232
- results = [results]
233
 
234
  fig = plt.figure(figsize=(14, 7))
235
-
236
- img_display = cv2.imread(img_path)
237
- img_display = cv2.cvtColor(img_display, cv2.COLOR_BGR2RGB)
238
-
239
  main_ax = plt.subplot2grid((2, 4), (0, 0), colspan=2, rowspan=2)
240
- main_ax.imshow(img_display)
241
- main_ax.set_title(f"Analyzed Image ({num_faces} face{'s' if num_faces > 1 else ''} detected)")
242
- main_ax.axis('off')
243
-
244
- for i, face_result in enumerate(results):
245
- if i >= 4:
246
- break
247
-
248
- age = face_result.get('age', 'N/A')
249
- gender = face_result.get('dominant_gender', 'N/A')
250
- race = face_result.get('dominant_race', 'N/A')
251
- emotion = face_result.get('dominant_emotion', 'N/A')
252
-
253
- gender_conf = 'N/A'
254
- if 'gender' in face_result and isinstance(face_result['gender'], dict):
255
- for g, conf in face_result['gender'].items():
256
- if g.lower() == gender.lower():
257
- gender_conf = f"{conf:.1f}%"
258
- break
259
-
260
- race_conf = 'N/A'
261
- if 'race' in face_result and isinstance(face_result['race'], dict):
262
- for r, conf in face_result['race'].items():
263
- if r.lower() == race.lower():
264
- race_conf = f"{conf:.1f}%"
265
- break
266
-
267
- emotion_conf = 'N/A'
268
- if 'emotion' in face_result and isinstance(face_result['emotion'], dict):
269
- for e, conf in face_result['emotion'].items():
270
- if e.lower() == emotion.lower():
271
- emotion_conf = f"{conf:.1f}%"
272
- break
273
-
274
- ax = plt.subplot2grid((2, 4), (0 if i < 2 else 1, 2 + (i % 2)))
275
-
276
- text = (
277
- f"Face #{i+1}\n\n"
278
- f"Age: {age}\n\n"
279
- f"Gender: {gender} ({gender_conf})\n\n"
280
- f"Race: {race} ({race_conf})\n\n"
281
- f"Emotion: {emotion} ({emotion_conf})"
282
- )
283
-
284
  ax.text(0.5, 0.5, text, ha='center', va='center', fontsize=11)
285
- ax.axis('off')
286
 
287
  plt.tight_layout()
 
288
 
289
- os.remove(img_path)
290
- os.rmdir(temp_dir)
291
-
292
- formatted_results = []
293
- for i, res in enumerate(results[:8]):
294
- face_data = {
295
- "face_number": i+1,
296
- "age": res.get("age", "N/A"),
297
- "gender": {
298
- "dominant": res.get("dominant_gender", "N/A"),
299
- "confidence": res.get("gender", {})
300
- },
301
- "race": {
302
- "dominant": res.get("dominant_race", "N/A"),
303
- "confidence": res.get("race", {})
304
- },
305
- "emotion": {
306
- "dominant": res.get("dominant_emotion", "N/A"),
307
- "confidence": res.get("emotion", {})
308
- }
309
- }
310
- formatted_results.append(face_data)
311
-
312
- return fig, formatted_results
313
-
314
  except Exception as e:
315
- if os.path.exists(img_path):
316
- os.remove(img_path)
317
- if os.path.exists(temp_dir):
318
- os.rmdir(temp_dir)
319
 
320
- error_msg = f"Error: {str(e)}"
321
- if "No face detected" in str(e):
322
- error_msg = "No face detected in the image. Please try a different image."
323
-
324
- return None, error_msg
325
-
326
- with gr.Blocks(title="Complete Face Recognition Tool", theme=gr.themes.Soft()) as demo:
327
  gr.Markdown("""
328
- # 🔍 Complete Face Recognition Tool
329
- This tool provides three face recognition features:
330
- - **Verify Faces**: Compare two specific images to check if they contain the same person
331
- - **Find Faces**: Search for matching faces in a database/folder
332
- - **Analyze Face**: Determine age, gender, race, and emotion from a facial image
333
  """)
334
 
335
  with gr.Tabs():
336
  with gr.TabItem("Verify Faces"):
337
  with gr.Row():
338
- img1_input = gr.Image(label="First Image", type="pil")
339
- img2_input = gr.Image(label="Second Image", type="pil")
340
-
341
- with gr.Row():
342
- verify_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05,
343
- label="Similarity Threshold (lower = stricter matching)")
344
- verify_model = gr.Dropdown(
345
- choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
346
- value="VGG-Face",
347
- label="Face Recognition Model"
348
- )
349
-
350
- verify_button = gr.Button("Verify Faces", variant="primary")
351
-
352
- verify_result_plot = gr.Plot(label="Verification Result")
353
- verify_json = gr.JSON(label="Technical Details")
354
-
355
- verify_button.click(
356
- verify_faces,
357
- inputs=[img1_input, img2_input, verify_threshold, verify_model],
358
- outputs=[verify_result_plot, verify_json]
359
- )
360
 
361
  with gr.TabItem("Find Faces"):
362
- query_img = gr.Image(label="Query Image (Face to find)", type="pil")
363
- db_path_input = gr.Textbox(label="Database Path (folder containing images to search in)")
364
- db_files_input = gr.File(label="Or upload images for database", file_count="multiple")
365
-
366
  with gr.Row():
367
- find_threshold = gr.Slider(minimum=0.1, maximum=0.9, value=0.6, step=0.05,
368
- label="Similarity Threshold (lower = stricter matching)")
369
- find_model = gr.Dropdown(
370
- choices=["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"],
371
- value="VGG-Face",
372
- label="Face Recognition Model"
373
- )
374
-
375
- find_button = gr.Button("Find Matching Faces", variant="primary")
376
-
377
- find_result_plot = gr.Plot(label="Search Results")
378
- find_results_table = gr.JSON(label="Detailed Results")
379
-
380
- find_button.click(
381
- find_faces,
382
- inputs=[query_img, db_path_input, find_threshold, find_model],
383
- outputs=[find_result_plot, find_results_table]
384
- )
385
-
386
- db_files_input.change(
387
- lambda x: "",
388
- inputs=db_files_input,
389
- outputs=db_path_input
390
- )
391
 
392
  with gr.TabItem("Analyze Face"):
393
- analyze_img = gr.Image(label="Upload Image for Analysis", type="pil")
394
- actions_checkboxes = gr.CheckboxGroup(
395
- choices=["age", "gender", "race", "emotion"],
396
- value=["age", "gender", "race", "emotion"],
397
- label="Select Attributes to Analyze"
398
- )
399
-
400
- analyze_button = gr.Button("Analyze Face", variant="primary")
401
-
402
- analyze_result_plot = gr.Plot(label="Analysis Results")
403
- analyze_json = gr.JSON(label="Detailed Analysis")
404
-
405
- analyze_button.click(
406
- analyze_face,
407
- inputs=[analyze_img, actions_checkboxes],
408
- outputs=[analyze_result_plot, analyze_json]
409
- )
410
-
411
- demo.launch()
 
1
+ # Required package setup
2
  import os
3
  import subprocess
4
  import sys
 
7
 
8
  def install_package(package, version=None):
9
  package_spec = f"{package}=={version}" if version else package
 
10
  try:
11
  subprocess.check_call([sys.executable, "-m", "pip", "install", "--no-cache-dir", package_spec])
12
  except subprocess.CalledProcessError as e:
 
19
  pkg_resources.require(f"{package}=={version}")
20
  else:
21
  importlib.import_module(package)
22
+ except (ImportError, pkg_resources.VersionConflict, pkg_resources.DistributionNotFound):
 
 
23
  install_package(package, version)
24
 
25
+ # Install packages
26
+ ensure_package("numpy", "1.23.5")
27
+ ensure_package("protobuf", "3.20.3")
28
+ ensure_package("tensorflow", "2.10.0")
29
+ for pkg in ["gradio", "opencv-python-headless", "matplotlib", "pillow", "pandas"]:
30
+ ensure_package(pkg)
31
+ ensure_package("deepface")
32
+
33
+ # Imports
 
 
 
 
 
 
 
 
34
  import gradio as gr
35
  import json
36
  import cv2
 
40
  import pandas as pd
41
  import shutil
42
  import matplotlib.pyplot as plt
 
 
43
  from deepface import DeepFace
44
 
45
+ # --- VERIFY FACES ---
46
  def verify_faces(img1, img2, threshold=0.70, model="VGG-Face"):
47
  temp_dir = tempfile.mkdtemp()
48
  img1_path = os.path.join(temp_dir, "image1.jpg")
49
  img2_path = os.path.join(temp_dir, "image2.jpg")
50
 
51
+ Image.fromarray(img1).save(img1_path)
52
+ Image.fromarray(img2).save(img2_path)
 
 
 
 
 
 
 
53
 
54
  try:
55
+ result = DeepFace.verify(img1_path, img2_path, model_name=model, distance_metric="cosine", threshold=threshold)
 
 
 
 
 
 
 
56
  fig, ax = plt.subplots(1, 2, figsize=(10, 5))
57
+ ax[0].imshow(cv2.cvtColor(cv2.imread(img1_path), cv2.COLOR_BGR2RGB))
58
+ ax[0].set_title("Image 1"); ax[0].axis("off")
59
+ ax[1].imshow(cv2.cvtColor(cv2.imread(img2_path), cv2.COLOR_BGR2RGB))
60
+ ax[1].set_title("Image 2"); ax[1].axis("off")
61
+
62
+ verified = result["verified"]
63
+ conf = round((1 - result["distance"]) * 100, 2)
64
+ plt.suptitle(f"{'✅ MATCHED' if verified else '❌ NOT MATCHED'}\nConfidence: {conf}%",
65
+ fontsize=16, fontweight='bold',
66
+ color='green' if verified else 'red')
 
 
 
 
 
 
 
 
 
 
 
67
  plt.tight_layout()
68
 
69
+ shutil.rmtree(temp_dir)
 
 
 
70
  return fig, json.dumps(result, indent=2)
 
71
  except Exception as e:
72
+ shutil.rmtree(temp_dir)
73
+ return None, str(e)
 
 
 
 
 
 
 
 
 
 
74
 
75
+ # --- FIND FACES ---
76
+ def find_faces(query_img, db_folder_path, threshold=0.70, model="VGG-Face"):
77
  temp_dir = tempfile.mkdtemp()
78
  query_path = os.path.join(temp_dir, "query.jpg")
79
+ Image.fromarray(query_img).save(query_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
  try:
82
+ if not os.path.isdir(db_folder_path):
83
+ return None, "Invalid database folder path. Please provide a valid local path."
84
+
85
  dfs = DeepFace.find(
86
  img_path=query_path,
87
+ db_path=db_folder_path,
88
  model_name=model,
89
  distance_metric="cosine",
90
  threshold=threshold
91
  )
92
 
93
  if isinstance(dfs, list):
94
+ df = dfs[0] if dfs else pd.DataFrame()
 
 
95
  else:
96
  df = dfs
97
 
98
  if df.empty:
99
+ return None, "No matching faces found."
100
 
101
  df = df.sort_values(by=["distance"])
 
102
  num_matches = min(4, len(df))
103
  fig, axes = plt.subplots(1, num_matches + 1, figsize=(15, 5))
104
 
105
+ query_display = cv2.cvtColor(cv2.imread(query_path), cv2.COLOR_BGR2RGB)
 
106
  axes[0].imshow(query_display)
107
+ axes[0].set_title("Query")
108
  axes[0].axis("off")
109
 
110
  for i in range(num_matches):
 
112
  distance = df.iloc[i]["distance"]
113
  confidence = round((1 - distance) * 100, 2)
114
 
115
+ match_img = cv2.cvtColor(cv2.imread(match_path), cv2.COLOR_BGR2RGB)
 
 
116
  axes[i+1].imshow(match_img)
117
  axes[i+1].set_title(f"Match #{i+1}\nConfidence: {confidence}%")
118
  axes[i+1].axis("off")
119
 
 
120
  plt.tight_layout()
121
 
122
+ df["confidence"] = ((1 - df["distance"]) * 100).round(2)
123
+ results = df[["identity", "distance", "confidence"]].to_dict("records")
 
 
 
 
 
 
 
 
124
 
125
+ shutil.rmtree(temp_dir)
126
+ return fig, results
127
  except Exception as e:
128
+ shutil.rmtree(temp_dir)
129
+ return None, str(e)
 
 
 
 
 
 
130
 
131
+ # --- ANALYZE FACE ---
132
  def analyze_face(img, actions=['age', 'gender', 'race', 'emotion']):
133
  temp_dir = tempfile.mkdtemp()
134
  img_path = os.path.join(temp_dir, "analyze.jpg")
135
+ Image.fromarray(img).save(img_path)
 
 
 
 
136
 
137
  try:
138
+ results = DeepFace.analyze(img_path=img_path, actions=actions, enforce_detection=True)
139
+ results = results if isinstance(results, list) else [results]
 
 
 
 
 
 
 
 
 
 
140
 
141
  fig = plt.figure(figsize=(14, 7))
 
 
 
 
142
  main_ax = plt.subplot2grid((2, 4), (0, 0), colspan=2, rowspan=2)
143
+ main_ax.imshow(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB))
144
+ main_ax.set_title(f"Detected {len(results)} Face(s)")
145
+ main_ax.axis("off")
146
+
147
+ for i, res in enumerate(results[:4]):
148
+ age = res.get("age", "N/A")
149
+ gender = res.get("dominant_gender", "N/A")
150
+ race = res.get("dominant_race", "N/A")
151
+ emotion = res.get("dominant_emotion", "N/A")
152
+
153
+ text = f"Face #{i+1}\n\nAge: {age}\nGender: {gender}\nRace: {race}\nEmotion: {emotion}"
154
+ ax = plt.subplot2grid((2, 4), (0 if i < 2 else 1, 2 + i % 2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  ax.text(0.5, 0.5, text, ha='center', va='center', fontsize=11)
156
+ ax.axis("off")
157
 
158
  plt.tight_layout()
159
+ shutil.rmtree(temp_dir)
160
 
161
+ return fig, results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  except Exception as e:
163
+ shutil.rmtree(temp_dir)
164
+ return None, str(e)
 
 
165
 
166
+ # --- GRADIO INTERFACE ---
167
+ with gr.Blocks(title="Face Recognition Tool", theme=gr.themes.Soft()) as demo:
 
 
 
 
 
168
  gr.Markdown("""
169
+ # 👤 Face Recognition Tool
170
+ - **Verify Faces**: Are two images of the same person?
171
+ - **Find Faces**: Search for matches from a folder.
172
+ - **Analyze Face**: Detect age, gender, race, emotion.
 
173
  """)
174
 
175
  with gr.Tabs():
176
  with gr.TabItem("Verify Faces"):
177
  with gr.Row():
178
+ img1 = gr.Image(label="Image 1", type="numpy")
179
+ img2 = gr.Image(label="Image 2", type="numpy")
180
+ threshold = gr.Slider(0.1, 0.9, value=0.7, step=0.05, label="Threshold")
181
+ model = gr.Dropdown(["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"], value="VGG-Face", label="Model")
182
+ btn = gr.Button("Verify")
183
+ out_plot = gr.Plot()
184
+ out_json = gr.JSON()
185
+ btn.click(verify_faces, inputs=[img1, img2, threshold, model], outputs=[out_plot, out_json])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
 
187
  with gr.TabItem("Find Faces"):
 
 
 
 
188
  with gr.Row():
189
+ query_img = gr.Image(label="Query Face", type="numpy")
190
+ db_path = gr.Textbox(label="Folder Path for Face Database")
191
+ threshold_find = gr.Slider(0.1, 0.9, value=0.7, step=0.05, label="Threshold")
192
+ model_find = gr.Dropdown(["VGG-Face", "Facenet", "OpenFace", "DeepFace", "ArcFace"], value="VGG-Face", label="Model")
193
+ btn_find = gr.Button("Find Matches")
194
+ out_plot_find = gr.Plot()
195
+ out_json_find = gr.JSON()
196
+ btn_find.click(find_faces, inputs=[query_img, db_path, threshold_find, model_find], outputs=[out_plot_find, out_json_find])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
  with gr.TabItem("Analyze Face"):
199
+ img = gr.Image(label="Upload Image", type="numpy")
200
+ actions = gr.CheckboxGroup(["age", "gender", "race", "emotion"], value=["age", "gender", "race", "emotion"], label="Attributes")
201
+ btn_analyze = gr.Button("Analyze")
202
+ out_plot_analyze = gr.Plot()
203
+ out_json_analyze = gr.JSON()
204
+ btn_analyze.click(analyze_face, inputs=[img, actions], outputs=[out_plot_analyze, out_json_analyze])
205
+
206
+ demo.launch()