Natwar's picture
Update app.py
94312f3 verified
raw
history blame
8.44 kB
# First install required dependencies
import subprocess
import sys
def install_package(package, version=None):
package_spec = f"{package}=={version}" if version else package
print(f"Installing {package_spec}...")
try:
subprocess.check_call([sys.executable, "-m", "pip", "install", "--no-cache-dir", package_spec])
except subprocess.CalledProcessError as e:
print(f"Failed to install {package_spec}: {e}")
raise
# List of required packages with specific versions
required_packages = [
("opencv-python-headless", "4.7.0.72"),
("deepface", "0.0.79"),
("tensorflow", "2.10.0"),
("gradio", "3.50.2"),
("matplotlib", "3.7.1"),
("pandas", "2.0.3"),
("Pillow", "10.0.1")
]
# Install all required packages
for pkg, ver in required_packages:
try:
install_package(pkg, ver)
except Exception as e:
print(f"Critical error installing {pkg}: {str(e)}")
sys.exit(1)
# Now import the rest of the modules
import gradio as gr
import cv2
import numpy as np
from deepface import DeepFace
import matplotlib.pyplot as plt
from PIL import Image
import tempfile
import os
import shutil
import pandas as pd
def verify_faces(img1, img2, threshold=0.6, model="VGG-Face"):
temp_dir = tempfile.mkdtemp()
try:
# Save images
img1_path = os.path.join(temp_dir, "img1.jpg")
img2_path = os.path.join(temp_dir, "img2.jpg")
Image.fromarray(img1).save(img1_path) if isinstance(img1, np.ndarray) else img1.save(img1_path)
Image.fromarray(img2).save(img2_path) if isinstance(img2, np.ndarray) else img2.save(img2_path)
# Verify faces
result = DeepFace.verify(
img1_path=img1_path,
img2_path=img2_path,
model_name=model,
distance_metric="cosine"
)
# Create visualization
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
for i, path in enumerate([img1_path, img2_path]):
img = cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)
ax[i].imshow(img)
ax[i].axis('off')
ax[i].set_title(f"Image {i+1}")
verified = result['distance'] <= threshold
plt.suptitle(f"{'βœ… MATCH' if verified else '❌ NO MATCH'}\nDistance: {result['distance']:.4f}")
return fig, result
except Exception as e:
return None, {"error": str(e)}
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def find_faces(query_img, db_input, threshold=0.6, model="VGG-Face"):
temp_dir = tempfile.mkdtemp()
try:
# Save query image
query_path = os.path.join(temp_dir, "query.jpg")
Image.fromarray(query_img).save(query_path) if isinstance(query_img, np.ndarray) else query_img.save(query_path)
# Handle database input
if isinstance(db_input, str) and os.path.isdir(db_input):
db_path = db_input
else:
db_path = os.path.join(temp_dir, "db")
os.makedirs(db_path, exist_ok=True)
if db_input:
for i, file in enumerate(db_input):
ext = os.path.splitext(file.name)[1]
shutil.copy(file.name, os.path.join(db_path, f"img_{i}{ext}"))
# Find faces
try:
dfs = DeepFace.find(
img_path=query_path,
db_path=db_path,
model_name=model,
distance_metric="cosine",
silent=True
)
except Exception as e:
return None, {"error": f"Face detection failed: {str(e)}"}
df = dfs[0] if isinstance(dfs, list) else dfs
if df.empty:
return None, {"error": "No matches found"}
df = df[df['distance'] <= threshold].sort_values('distance')
# Create visualization
num_matches = min(4, len(df))
fig, axes = plt.subplots(1, num_matches + 1, figsize=(15, 5))
# Show query image
query_img = cv2.cvtColor(cv2.imread(query_path), cv2.COLOR_BGR2RGB)
axes[0].imshow(query_img)
axes[0].set_title("Query")
axes[0].axis('off')
# Show matches
for i in range(num_matches):
if i >= len(df): break
match_path = df.iloc[i]['identity']
if not os.path.exists(match_path):
continue
try:
match_img = cv2.cvtColor(cv2.imread(match_path), cv2.COLOR_BGR2RGB)
axes[i+1].imshow(match_img)
axes[i+1].set_title(f"Match {i+1}\n{df.iloc[i]['distance']:.4f}")
axes[i+1].axis('off')
except:
continue
return fig, df[['identity', 'distance']].to_dict('records')
except Exception as e:
return None, {"error": str(e)}
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def analyze_face(img, actions=['age', 'gender', 'emotion']):
temp_dir = tempfile.mkdtemp()
try:
# Save image
img_path = os.path.join(temp_dir, "analyze.jpg")
Image.fromarray(img).save(img_path) if isinstance(img, np.ndarray) else img.save(img_path)
# Analyze face
results = DeepFace.analyze(
img_path=img_path,
actions=actions,
enforce_detection=False,
detector_backend='opencv'
)
# Process results
results = results if isinstance(results, list) else [results]
fig = plt.figure(figsize=(10, 5))
# Show image
plt.subplot(121)
img_display = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)
plt.imshow(img_display)
plt.title("Input Image")
plt.axis('off')
# Show attributes
plt.subplot(122)
attributes = {k: v for res in results for k, v in res.items() if k != 'region'}
plt.barh(list(attributes.keys()), list(attributes.values()))
plt.title("Analysis Results")
plt.tight_layout()
return fig, results
except Exception as e:
return None, {"error": str(e)}
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
# Gradio Interface
with gr.Blocks(title="Face Recognition Toolkit", theme=gr.themes.Soft()) as demo:
gr.Markdown("# πŸ§‘πŸ’» Face Recognition Toolkit")
with gr.Tabs():
with gr.Tab("Verify Faces"):
with gr.Row():
img1 = gr.Image(label="First Image", type="pil")
img2 = gr.Image(label="Second Image", type="pil")
verify_threshold = gr.Slider(0.1, 1.0, 0.6, label="Match Threshold")
verify_model = gr.Dropdown(["VGG-Face", "Facenet", "OpenFace"], value="VGG-Face")
verify_btn = gr.Button("Verify Faces")
verify_output = gr.Plot()
verify_json = gr.JSON()
verify_btn.click(
verify_faces,
[img1, img2, verify_threshold, verify_model],
[verify_output, verify_json]
)
with gr.Tab("Find Faces"):
query_img = gr.Image(label="Query Image", type="pil")
db_input = gr.Textbox("", label="Database Path (optional)")
db_files = gr.File(file_count="multiple", label="Upload Database Images")
find_threshold = gr.Slider(0.1, 1.0, 0.6, label="Similarity Threshold")
find_model = gr.Dropdown(["VGG-Face", "Facenet", "OpenFace"], value="VGG-Face")
find_btn = gr.Button("Find Matches")
find_output = gr.Plot()
find_json = gr.JSON()
find_btn.click(
find_faces,
[query_img, db_files, find_threshold, find_model],
[find_output, find_json]
)
with gr.Tab("Analyze Face"):
analyze_img = gr.Image(label="Input Image", type="pil")
analyze_actions = gr.CheckboxGroup(
["age", "gender", "emotion", "race"],
value=["age", "gender", "emotion"],
label="Analysis Features"
)
analyze_btn = gr.Button("Analyze")
analyze_output = gr.Plot()
analyze_json = gr.JSON()
analyze_btn.click(
analyze_face,
[analyze_img, analyze_actions],
[analyze_output, analyze_json]
)
demo.launch()