alexmourachko
Initial commit
57f7624
import gradio as gr
import os
import glob
import json
import re
from pathlib import Path
import json
def group_images_by_index(image_paths, is_audio=False):
# Regular expression pattern to extract the key from each image path
if is_audio:
pattern = r"audio_(\d+).png"
else:
pattern = r"img_(\d+).png"
# Dictionary to store the grouped images
grouped_images = {}
# Iterate over each image path
for image_path in image_paths:
# Extract the key using the regular expression pattern
match = re.search(pattern, image_path)
if match:
key = int(match.group(1))
# Add the image path to the corresponding group in the dictionary
if key not in grouped_images:
grouped_images[key] = []
grouped_images[key].append(image_path)
# Sort the dictionary by keys
sorted_grouped_images = dict(sorted(grouped_images.items()))
return sorted_grouped_images
def build_image_description(i, data_none, data_attack):
if i == 0:
fake_det_score = float(data_none["fake_det_score"])
return f"det_score: {fake_det_score:.2f}"
elif i == 1:
psnr = float(data_none["psnr"])
ssim = float(data_none["ssim"])
lpips = float(data_none["lpips"])
det_score = float(data_none["watermark_det_score"])
p_value = data_none["p_value"]
bit_acc = data_none["bit_acc"]
return f"psnr: {psnr:.2f} ssim: {ssim:.2f} lpips: {lpips:.2f} det_score: {det_score:.2f} p_value: {p_value} bit_acc: {bit_acc}"
elif i == 2:
fake_det_score = float(data_attack["fake_det_score"])
return f"det_score: {fake_det_score:.2f}"
elif i == 3:
det_score = float(data_attack["watermark_det_score"])
p_value = data_attack["p_value"]
word_acc = data_attack["word_acc"]
bit_acc = data_attack["bit_acc"]
return f"word_acc: {word_acc:.2f} det_score: {det_score:.2f} p_value: {p_value} bit_acc: {bit_acc}"
def build_audio_description(i, data_none, data_attack):
if i == 0:
tn_detect_prob = float(data_none["tn_detect_prob"])
return f"det_score: {tn_detect_prob:.2f}"
elif i == 1:
snr = float(data_none["snr"])
sisnr = float(data_none["sisnr"])
stoi = float(data_none["stoi"])
pesq = float(data_none["pesq"])
det_score = float(data_none["detect_prob"])
bit_acc = data_none["ba"]
return f"snr: {snr:.2f} sisnr: {sisnr:.2f} stoi: {stoi:.2f} pesq: {pesq:.2f} det_score: {det_score:.2f} bit_acc: {bit_acc}"
elif i == 2:
tn_detect_prob = float(data_attack["tn_detect_prob"])
return f"det_score: {tn_detect_prob:.2f}"
elif i == 3:
det_score = float(data_attack["detect_prob"])
bit_acc = data_attack["ba"]
return f"det_score: {det_score:.2f} bit_acc: {bit_acc}"
def build_image_infos(abs_path: Path):
with (abs_path / "data/image_eval_results.json").open("r") as f:
image_data = json.loads(f.read())
examples_dir = Path("./examples/image")
image_infos = {}
model_image_infos = {}
for model_name in os.listdir(examples_dir):
model_attacks_dir = examples_dir / model_name
for attack_name in os.listdir(model_attacks_dir):
attack_dir = model_attacks_dir / attack_name
image_paths = glob.glob(f"{attack_dir}/*.png")
all_files = []
for i, files in group_images_by_index(image_paths).items():
data_none = image_data["eval"]["val2014"]["wam"]["none"][i]
data_attack = image_data["eval"]["val2014"]["wam"][attack_name][i]
files = sorted([(f, Path(f).stem) for f in files], key=lambda x: x[1])
files = files[2:] + files[:2]
files = [
(f, f"{n}\n{build_image_description(i, data_none, data_attack)}")
for i, (f, n) in enumerate(files)
]
all_files.extend(files)
model_image_infos[attack_name] = all_files
image_infos[model_name] = model_image_infos
return image_infos
def build_audio_infos(abs_path: Path):
with (abs_path / "data/audio_eval_results.json").open("r") as f:
audio_data = json.loads(f.read())
examples_dir = Path("./examples/audio")
audio_infos = {}
model_audio_infos = {}
for model_name in os.listdir(examples_dir):
model_attacks_dir = examples_dir / model_name
for attack_name in os.listdir(model_attacks_dir):
attack_dir = model_attacks_dir / attack_name
image_paths = glob.glob(f"{attack_dir}/*.png")
all_files = []
for i, files in group_images_by_index(image_paths, is_audio=True).items():
data_none = audio_data["eval"]["ravdess"][model_name]["identity"][i]
data_attack = audio_data["eval"]["ravdess"][model_name][attack_name][i]
files = sorted([(f, Path(f).stem) for f in files], key=lambda x: x[1])
files = files[2:] + files[:2]
files = [
(f, f"{n}\n{build_audio_description(i, data_none, data_attack)}")
for i, (f, n) in enumerate(files)
]
all_files.extend(files)
model_audio_infos[attack_name] = all_files
audio_infos[model_name] = model_audio_infos
return audio_infos
def examples_tab(abs_path: Path):
image_infos = build_image_infos(abs_path)
# First combo box (category selection)
model_choice = gr.Dropdown(
choices=list(image_infos.keys()),
label="Select a Model",
value=None,
)
# Second combo box (subcategory selection)
# Initialize with options from the first category by default
attack_choice = gr.Dropdown(
choices=list(image_infos["wam"].keys()),
label="Select an Attack",
value=None,
)
# Gallery component to display images
gallery = gr.Gallery(
label="Image Gallery",
columns=4,
rows=1,
)
# Update options for the second combo box when the first one changes
def update_subcategories(selected_category):
values = list(image_infos[selected_category].keys())
values = [(v, v) for v in values]
attack_choice.choices = values
# return gr.Dropdown.update(choices=list(image_infos[selected_category].keys()))
# Function to load images based on selections from both combo boxes
def load_images(category, subcategory):
return image_infos.get(category, {}).get(subcategory, [])
# Update gallery based on both combo box selections
model_choice.change(
fn=update_subcategories, inputs=model_choice, outputs=attack_choice
)
attack_choice.change(
fn=load_images, inputs=[model_choice, attack_choice], outputs=gallery
)
def audio_examples_tab(abs_path: Path):
audio_infos = build_audio_infos(abs_path)
# First combo box (category selection)
model_choice = gr.Dropdown(
choices=list(audio_infos.keys()),
label="Select a Model",
value=None,
)
# Second combo box (subcategory selection)
# Initialize with options from the first category by default
attack_choice = gr.Dropdown(
choices=list(audio_infos["audioseal"].keys()),
label="Select an Attack",
value=None,
)
# Gallery component to display images
gallery = gr.Gallery(
label="Image Gallery", columns=4, rows=1, object_fit="scale-down"
)
# Update options for the second combo box when the first one changes
def update_subcategories(selected_category):
values = list(audio_infos[selected_category].keys())
values = [(v, v) for v in values]
attack_choice.choices = values
# return gr.Dropdown.update(choices=list(image_infos[selected_category].keys()))
# Function to load images based on selections from both combo boxes
def load_audios(category, subcategory):
return audio_infos.get(category, {}).get(subcategory, [])
# Update gallery based on both combo box selections
model_choice.change(
fn=update_subcategories, inputs=model_choice, outputs=attack_choice
)
attack_choice.change(
fn=load_audios, inputs=[model_choice, attack_choice], outputs=gallery
)