Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -12,16 +12,6 @@ from qwenimage.transformer_qwenimage import QwenImageTransformer2DModel
|
|
| 12 |
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
|
| 13 |
|
| 14 |
import math
|
| 15 |
-
from huggingface_hub import hf_hub_download
|
| 16 |
-
from safetensors.torch import load_file
|
| 17 |
-
|
| 18 |
-
from PIL import Image
|
| 19 |
-
import os
|
| 20 |
-
import gradio as gr
|
| 21 |
-
from gradio_client import Client, handle_file
|
| 22 |
-
import tempfile
|
| 23 |
-
from huggingface_hub import InferenceClient
|
| 24 |
-
|
| 25 |
|
| 26 |
# --- Model Loading ---
|
| 27 |
dtype = torch.bfloat16
|
|
@@ -48,13 +38,13 @@ scheduler = FlowMatchEulerDiscreteScheduler.from_config(scheduler_config)
|
|
| 48 |
|
| 49 |
pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", scheduler=scheduler, torch_dtype=dtype)
|
| 50 |
|
| 51 |
-
# Load the
|
| 52 |
-
pipe.load_lora_weights("
|
| 53 |
-
weight_name="
|
| 54 |
pipe.load_lora_weights("lightx2v/Qwen-Image-Lightning",
|
| 55 |
weight_name="Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors", adapter_name="lightning")
|
| 56 |
-
pipe.set_adapters(["
|
| 57 |
-
pipe.fuse_lora(adapter_names=["
|
| 58 |
pipe.unload_lora_weights()
|
| 59 |
|
| 60 |
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
|
@@ -66,657 +56,154 @@ optimize_pipeline_(pipe, image=[Image.new("RGB", (1024, 1024)), Image.new("RGB",
|
|
| 66 |
|
| 67 |
MAX_SEED = np.iinfo(np.int32).max
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
def translate_to_chinese(text: str) -> str:
|
| 74 |
-
"""Translate any language text to Chinese using Qwen API."""
|
| 75 |
-
if not text or not text.strip():
|
| 76 |
-
return ""
|
| 77 |
-
|
| 78 |
-
# Check if text is already primarily Chinese
|
| 79 |
-
chinese_chars = sum(1 for char in text if '\u4e00' <= char <= '\u9fff')
|
| 80 |
-
if chinese_chars / max(len(text), 1) > 0.5:
|
| 81 |
-
# Already mostly Chinese, return as is
|
| 82 |
-
return text
|
| 83 |
-
|
| 84 |
-
try:
|
| 85 |
-
completion = translation_client.chat.completions.create(
|
| 86 |
-
model="Qwen/Qwen3-Next-80B-A3B-Instruct:novita",
|
| 87 |
-
messages=[
|
| 88 |
-
{
|
| 89 |
-
"role": "system",
|
| 90 |
-
"content": "You are a professional translator. Translate the user's text to Chinese. Only output the translated text, nothing else."
|
| 91 |
-
},
|
| 92 |
-
{
|
| 93 |
-
"role": "user",
|
| 94 |
-
"content": f"Translate this to Chinese: {text}"
|
| 95 |
-
}
|
| 96 |
-
],
|
| 97 |
-
max_tokens=500,
|
| 98 |
-
)
|
| 99 |
-
|
| 100 |
-
translated = completion.choices[0].message.content.strip()
|
| 101 |
-
print(f"Translated '{text}' to '{translated}'")
|
| 102 |
-
return translated
|
| 103 |
-
except Exception as e:
|
| 104 |
-
print(f"Translation error: {e}")
|
| 105 |
-
# Fallback to original text if translation fails
|
| 106 |
-
return text
|
| 107 |
-
|
| 108 |
-
def _generate_video_segment(input_image_path: str, output_image_path: str, prompt: str, request: gr.Request) -> str:
|
| 109 |
-
"""Generates a single video segment using the external service."""
|
| 110 |
-
x_ip_token = request.headers['x-ip-token']
|
| 111 |
-
video_client = Client("multimodalart/wan-2-2-first-last-frame", headers={"x-ip-token": x_ip_token})
|
| 112 |
-
result = video_client.predict(
|
| 113 |
-
start_image_pil=handle_file(input_image_path),
|
| 114 |
-
end_image_pil=handle_file(output_image_path),
|
| 115 |
-
prompt=prompt, api_name="/generate_video",
|
| 116 |
-
)
|
| 117 |
-
return result[0]["video"]
|
| 118 |
-
|
| 119 |
-
def build_relight_prompt(light_type, light_type_custom, light_direction, light_direction_custom,
|
| 120 |
-
light_intensity, light_intensity_custom, illumination_env,
|
| 121 |
-
illumination_env_custom, prompt):
|
| 122 |
-
"""Build the relighting prompt based on user selections - Qwen style."""
|
| 123 |
-
|
| 124 |
-
# Priority 1: User's prompt (translated to Chinese if needed)
|
| 125 |
-
if prompt and prompt.strip():
|
| 126 |
-
translated = translate_to_chinese(prompt)
|
| 127 |
-
# Add trigger word if not already present
|
| 128 |
-
if "重新照明" not in translated:
|
| 129 |
-
return f"重新照明,{translated}"
|
| 130 |
-
return translated
|
| 131 |
-
|
| 132 |
-
# Priority 2: Build from controls
|
| 133 |
-
prompt_parts = ["重新照明"]
|
| 134 |
-
|
| 135 |
-
# Light type descriptions (expanded from IC-Light style but in Chinese)
|
| 136 |
-
light_descriptions = {
|
| 137 |
-
"none": "",
|
| 138 |
-
"soft_window": "窗帘透光(柔和漫射)",
|
| 139 |
-
"golden_hour": "金色黄昏的温暖光线",
|
| 140 |
-
"studio": "专业摄影棚的均匀光线",
|
| 141 |
-
"dramatic": "戏剧性的高对比度光线",
|
| 142 |
-
"natural": "自然日光",
|
| 143 |
-
"neon": "霓虹灯光效果",
|
| 144 |
-
"candlelight": "烛光的温暖氛围",
|
| 145 |
-
"moonlight": "月光的冷色调",
|
| 146 |
-
"sunrise": "日出的柔和光线",
|
| 147 |
-
"sunset_sea": "海面日落光线",
|
| 148 |
-
"overcast": "阴天的柔和漫射���",
|
| 149 |
-
"harsh_sun": "强烈的正午阳光",
|
| 150 |
-
"twilight": "黄昏时分的神秘光线",
|
| 151 |
-
"aurora": "极光般的多彩光线",
|
| 152 |
-
"firelight": "篝火的跳动光线",
|
| 153 |
-
"lightning": "闪电的瞬间强光",
|
| 154 |
-
"underwater": "水下的柔和蓝光",
|
| 155 |
-
"foggy": "雾气中的柔和扩散光",
|
| 156 |
-
"magic": "魔法般的神秘光芒",
|
| 157 |
-
"cyberpunk": "赛博朋克风格的RGB霓虹光",
|
| 158 |
-
"warm_home": "家庭温馨的暖色光",
|
| 159 |
-
"cold_industrial": "冷酷的工业照明",
|
| 160 |
-
"spotlight": "聚光灯效果",
|
| 161 |
-
"rim_light": "边缘光效果",
|
| 162 |
-
}
|
| 163 |
-
|
| 164 |
-
# Direction descriptions (from IC-Light)
|
| 165 |
-
direction_descriptions = {
|
| 166 |
-
"none": "",
|
| 167 |
-
"front": "正面照射",
|
| 168 |
-
"side": "侧面照射",
|
| 169 |
-
"left": "左侧照射",
|
| 170 |
-
"right": "右侧照射",
|
| 171 |
-
"back": "背后照射(逆光)",
|
| 172 |
-
"top": "上方照射",
|
| 173 |
-
"bottom": "下方照射",
|
| 174 |
-
"diagonal": "对角线照射",
|
| 175 |
-
}
|
| 176 |
-
|
| 177 |
-
# Intensity descriptions
|
| 178 |
-
intensity_descriptions = {
|
| 179 |
-
"none": "",
|
| 180 |
-
"soft": "柔和强度",
|
| 181 |
-
"medium": "中等强度",
|
| 182 |
-
"strong": "强烈强度",
|
| 183 |
-
}
|
| 184 |
-
|
| 185 |
-
# Illumination environments (from IC-Light vary, translated)
|
| 186 |
-
illumination_envs = {
|
| 187 |
-
"none": "",
|
| 188 |
-
"sunshine_window": "阳光从窗户透入",
|
| 189 |
-
"neon_city": "霓虹夜景,城市灯光",
|
| 190 |
-
"sci_fi_rgb": "科幻RGB发光,赛博朋克风格",
|
| 191 |
-
"warm_bedroom": "温暖氛围,家中,卧室",
|
| 192 |
-
"magic_lit": "魔法照明",
|
| 193 |
-
"gothic_cave": "邪恶哥特风格,洞穴中",
|
| 194 |
-
"light_shadow": "光影交错",
|
| 195 |
-
"window_shadow": "窗户投影",
|
| 196 |
-
"soft_studio": "柔和摄影棚灯光",
|
| 197 |
-
"cozy_bedroom": "家庭氛围,温馨卧室照明",
|
| 198 |
-
"wong_kar_wai": "王家卫风格霓虹灯,温暖色调",
|
| 199 |
-
"moonlight_curtains": "月光透过窗帘",
|
| 200 |
-
"stormy_sky": "暴风雨天空照明",
|
| 201 |
-
"underwater_glow": "水下发光,深海",
|
| 202 |
-
"foggy_forest": "雾中森林黎明",
|
| 203 |
-
"meadow_golden": "草地上的黄金时刻",
|
| 204 |
-
"rainbow_neon": "彩虹反射,霓虹",
|
| 205 |
-
"apocalyptic": "末日烟雾氛围",
|
| 206 |
-
"emergency_red": "红色紧急灯光",
|
| 207 |
-
"mystical_forest": "神秘发光,魔法森林",
|
| 208 |
-
"campfire": "篝火光芒",
|
| 209 |
-
"industrial_harsh": "严酷工业照明",
|
| 210 |
-
"mountain_sunrise": "山中日出",
|
| 211 |
-
"desert_evening": "沙漠黄昏",
|
| 212 |
-
"dark_alley": "黑暗小巷的月光",
|
| 213 |
-
"fairground": "游乐场的金色光芒",
|
| 214 |
-
"forest_midnight": "森林深夜",
|
| 215 |
-
"twilight_purple": "黄昏的紫粉色调",
|
| 216 |
-
"foggy_morning": "雾蒙蒙的早晨",
|
| 217 |
-
"rustic_candle": "乡村风格烛光",
|
| 218 |
-
"office_fluorescent": "办公室荧光灯",
|
| 219 |
-
"storm_lightning": "暴风雨中的闪电",
|
| 220 |
-
"fireplace_night": "夜晚壁炉的温暖光芒",
|
| 221 |
-
"ethereal_magic": "空灵发光,魔法森林",
|
| 222 |
-
"beach_dusky": "海滩的黄昏",
|
| 223 |
-
"trees_afternoon": "树林中的午后光线",
|
| 224 |
-
"urban_blue_neon": "蓝色霓虹灯,城市街道",
|
| 225 |
-
"rain_police": "雨中红蓝警灯",
|
| 226 |
-
"aurora_arctic": "极光,北极景观",
|
| 227 |
-
"foggy_mountains": "雾中山峦日出",
|
| 228 |
-
"city_skyline": "城市天际线的黄金时刻",
|
| 229 |
-
"twilight_mist": "神秘黄昏,浓雾",
|
| 230 |
-
"forest_rays": "森林空地的清晨光线",
|
| 231 |
-
"festival_lantern": "节日多彩灯笼光",
|
| 232 |
-
"stained_glass": "彩色玻璃的柔和光芒",
|
| 233 |
-
"dark_spotlight": "黑暗房间的强烈聚光",
|
| 234 |
-
"lake_evening": "湖面柔和的黄昏光",
|
| 235 |
-
"cave_crystal": "洞穴水晶反射",
|
| 236 |
-
"autumn_forest": "秋林中的鲜艳光线",
|
| 237 |
-
"snowfall_dusk": "黄昏轻柔降雪",
|
| 238 |
-
"winter_hazy": "冬日清晨的朦胧光",
|
| 239 |
-
"rain_city": "雨中城市灯光倒影",
|
| 240 |
-
"trees_golden_sun": "金色阳光穿过树林",
|
| 241 |
-
"fireflies_summer": "萤火虫点亮夏夜",
|
| 242 |
-
"forge_embers": "锻造炉的发光余烬",
|
| 243 |
-
"gothic_castle": "哥特城堡的昏暗烛光",
|
| 244 |
-
"starlight_midnight": "午夜明亮星光",
|
| 245 |
-
"rural_sunset": "乡村的温暖日落",
|
| 246 |
-
"haunted_flicker": "闹鬼房屋的闪烁灯光",
|
| 247 |
-
"desert_mirage": "沙漠日落海市蜃楼般的光",
|
| 248 |
-
"storm_beams": "风暴云中穿透的金色光束",
|
| 249 |
-
}
|
| 250 |
-
|
| 251 |
-
# Build the prompt - Qwen style (comma-separated, Chinese)
|
| 252 |
-
# Handle custom light type
|
| 253 |
-
if light_type == "custom" and light_type_custom and light_type_custom.strip():
|
| 254 |
-
prompt_parts.append(translate_to_chinese(light_type_custom))
|
| 255 |
-
elif light_type != "none":
|
| 256 |
-
prompt_parts.append(light_descriptions.get(light_type, ""))
|
| 257 |
-
|
| 258 |
-
# Handle custom illumination environment
|
| 259 |
-
if illumination_env == "custom" and illumination_env_custom and illumination_env_custom.strip():
|
| 260 |
-
prompt_parts.append(translate_to_chinese(illumination_env_custom))
|
| 261 |
-
elif illumination_env != "none":
|
| 262 |
-
prompt_parts.append(illumination_envs.get(illumination_env, ""))
|
| 263 |
-
|
| 264 |
-
# Handle custom light direction
|
| 265 |
-
if light_direction == "custom" and light_direction_custom and light_direction_custom.strip():
|
| 266 |
-
prompt_parts.append(translate_to_chinese(light_direction_custom))
|
| 267 |
-
elif light_direction != "none":
|
| 268 |
-
prompt_parts.append(direction_descriptions.get(light_direction, ""))
|
| 269 |
|
| 270 |
-
|
| 271 |
-
if light_intensity == "custom" and light_intensity_custom and light_intensity_custom.strip():
|
| 272 |
-
prompt_parts.append(translate_to_chinese(light_intensity_custom))
|
| 273 |
-
elif light_intensity != "none":
|
| 274 |
-
prompt_parts.append(intensity_descriptions.get(light_intensity, ""))
|
| 275 |
|
| 276 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 277 |
|
| 278 |
-
#
|
| 279 |
-
|
| 280 |
-
|
| 281 |
|
| 282 |
-
return
|
| 283 |
-
|
| 284 |
|
| 285 |
@spaces.GPU
|
| 286 |
-
def
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
light_type_custom,
|
| 290 |
-
light_direction,
|
| 291 |
-
light_direction_custom,
|
| 292 |
-
light_intensity,
|
| 293 |
-
light_intensity_custom,
|
| 294 |
-
illumination_env,
|
| 295 |
-
illumination_env_custom,
|
| 296 |
prompt,
|
| 297 |
-
seed,
|
| 298 |
-
randomize_seed,
|
| 299 |
-
true_guidance_scale,
|
| 300 |
-
num_inference_steps,
|
| 301 |
-
height,
|
| 302 |
-
width,
|
| 303 |
-
prev_output = None,
|
| 304 |
progress=gr.Progress(track_tqdm=True)
|
| 305 |
):
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
|
|
|
| 312 |
if randomize_seed:
|
| 313 |
seed = random.randint(0, MAX_SEED)
|
| 314 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 315 |
-
|
| 316 |
-
#
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
if len(pil_images) == 0:
|
| 327 |
-
raise gr.Error("Please upload an image first.")
|
| 328 |
-
|
| 329 |
result = pipe(
|
| 330 |
image=pil_images,
|
| 331 |
-
prompt=
|
| 332 |
-
height=height
|
| 333 |
-
width=width
|
| 334 |
num_inference_steps=num_inference_steps,
|
| 335 |
generator=generator,
|
| 336 |
true_cfg_scale=true_guidance_scale,
|
| 337 |
num_images_per_prompt=1,
|
| 338 |
).images[0]
|
| 339 |
-
|
| 340 |
-
return result, seed, final_prompt
|
| 341 |
-
|
| 342 |
-
def create_video_between_images(input_image, output_image, prompt: str, request: gr.Request) -> str:
|
| 343 |
-
"""Create a video between the input and output images."""
|
| 344 |
-
if input_image is None or output_image is None:
|
| 345 |
-
raise gr.Error("Both input and output images are required to create a video.")
|
| 346 |
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
|
| 350 |
-
input_image.save(tmp.name)
|
| 351 |
-
input_image_path = tmp.name
|
| 352 |
-
|
| 353 |
-
output_pil = Image.fromarray(output_image.astype('uint8'))
|
| 354 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
|
| 355 |
-
output_pil.save(tmp.name)
|
| 356 |
-
output_image_path = tmp.name
|
| 357 |
-
|
| 358 |
-
video_path = _generate_video_segment(
|
| 359 |
-
input_image_path,
|
| 360 |
-
output_image_path,
|
| 361 |
-
prompt if prompt else "Relighting transformation",
|
| 362 |
-
request
|
| 363 |
-
)
|
| 364 |
-
return video_path
|
| 365 |
-
except Exception as e:
|
| 366 |
-
raise gr.Error(f"Video generation failed: {e}")
|
| 367 |
-
|
| 368 |
|
| 369 |
# --- UI ---
|
| 370 |
css = '''
|
| 371 |
-
#col-container { max-width:
|
| 372 |
.dark .progress-text{color: white !important}
|
| 373 |
-
#examples{max-width:
|
| 374 |
-
.radio-group {display: grid; grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); gap: 8px;}
|
| 375 |
-
.radio-group [data-testid="block-info"] { display: none !important }
|
| 376 |
'''
|
| 377 |
|
| 378 |
-
def reset_all():
|
| 379 |
-
return ["none", "", "none", "", "none", "", "none", "", "", False]
|
| 380 |
-
|
| 381 |
-
def end_reset():
|
| 382 |
-
return False
|
| 383 |
-
|
| 384 |
-
def update_dimensions_on_upload(image):
|
| 385 |
-
if image is None:
|
| 386 |
-
return 1024, 1024
|
| 387 |
-
|
| 388 |
-
original_width, original_height = image.size
|
| 389 |
-
|
| 390 |
-
if original_width > original_height:
|
| 391 |
-
new_width = 1024
|
| 392 |
-
aspect_ratio = original_height / original_width
|
| 393 |
-
new_height = int(new_width * aspect_ratio)
|
| 394 |
-
else:
|
| 395 |
-
new_height = 1024
|
| 396 |
-
aspect_ratio = original_width / original_height
|
| 397 |
-
new_width = int(new_height * aspect_ratio)
|
| 398 |
-
|
| 399 |
-
# Ensure dimensions are multiples of 8
|
| 400 |
-
new_width = (new_width // 8) * 8
|
| 401 |
-
new_height = (new_height // 8) * 8
|
| 402 |
-
|
| 403 |
-
return new_width, new_height
|
| 404 |
-
|
| 405 |
-
def toggle_custom_textbox(choice):
|
| 406 |
-
"""Show textbox when Custom is selected"""
|
| 407 |
-
return gr.update(visible=(choice == "custom"))
|
| 408 |
-
|
| 409 |
-
|
| 410 |
with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
|
| 411 |
with gr.Column(elem_id="col-container"):
|
| 412 |
-
gr.Markdown("
|
| 413 |
gr.Markdown("""
|
| 414 |
-
Qwen
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
with gr.Row():
|
| 419 |
-
with gr.Column(
|
| 420 |
-
image = gr.Image(label="Input Image", type="pil")
|
| 421 |
-
prev_output = gr.Image(value=None, visible=False)
|
| 422 |
-
is_reset = gr.Checkbox(value=False, visible=False)
|
| 423 |
-
|
| 424 |
-
with gr.Tab("Compose Prompt"):
|
| 425 |
-
with gr.Accordion("💡 Light Type", open=True):
|
| 426 |
-
light_type = gr.Radio(
|
| 427 |
-
choices=[
|
| 428 |
-
("None", "none"),
|
| 429 |
-
("Soft Window Light", "soft_window"),
|
| 430 |
-
("Golden Hour", "golden_hour"),
|
| 431 |
-
("Studio Lighting", "studio"),
|
| 432 |
-
("Dramatic", "dramatic"),
|
| 433 |
-
("Natural Daylight", "natural"),
|
| 434 |
-
("Neon", "neon"),
|
| 435 |
-
("Candlelight", "candlelight"),
|
| 436 |
-
("Moonlight", "moonlight"),
|
| 437 |
-
("Sunrise", "sunrise"),
|
| 438 |
-
("Sunset over Sea", "sunset_sea"),
|
| 439 |
-
("Overcast", "overcast"),
|
| 440 |
-
("Harsh Sunlight", "harsh_sun"),
|
| 441 |
-
("Twilight", "twilight"),
|
| 442 |
-
("Aurora", "aurora"),
|
| 443 |
-
("Firelight", "firelight"),
|
| 444 |
-
("Lightning", "lightning"),
|
| 445 |
-
("Underwater", "underwater"),
|
| 446 |
-
("Foggy", "foggy"),
|
| 447 |
-
("Magic Light", "magic"),
|
| 448 |
-
("Cyberpunk", "cyberpunk"),
|
| 449 |
-
("Warm Home", "warm_home"),
|
| 450 |
-
("Cold Industrial", "cold_industrial"),
|
| 451 |
-
("Spotlight", "spotlight"),
|
| 452 |
-
("Rim Light", "rim_light"),
|
| 453 |
-
("Custom", "custom"),
|
| 454 |
-
],
|
| 455 |
-
value="none",
|
| 456 |
-
elem_classes="radio-group"
|
| 457 |
-
)
|
| 458 |
-
light_type_custom = gr.Textbox(
|
| 459 |
-
label="Custom Light Type",
|
| 460 |
-
placeholder="e.g., Bioluminescent glow, Laser light show, etc.",
|
| 461 |
-
visible=False
|
| 462 |
-
)
|
| 463 |
-
|
| 464 |
-
with gr.Accordion("🧭 Light Direction", open=True):
|
| 465 |
-
light_direction = gr.Radio(
|
| 466 |
-
choices=[
|
| 467 |
-
("None", "none"),
|
| 468 |
-
("Front", "front"),
|
| 469 |
-
("Side", "side"),
|
| 470 |
-
("Left", "left"),
|
| 471 |
-
("Right", "right"),
|
| 472 |
-
("Back (Backlight)", "back"),
|
| 473 |
-
("Top", "top"),
|
| 474 |
-
("Bottom", "bottom"),
|
| 475 |
-
("Diagonal", "diagonal"),
|
| 476 |
-
("Custom", "custom"),
|
| 477 |
-
],
|
| 478 |
-
value="none",
|
| 479 |
-
elem_classes="radio-group"
|
| 480 |
-
)
|
| 481 |
-
light_direction_custom = gr.Textbox(
|
| 482 |
-
label="Custom Light Direction",
|
| 483 |
-
placeholder="e.g., From 45 degrees above left, Rotating around subject, etc.",
|
| 484 |
-
visible=False
|
| 485 |
-
)
|
| 486 |
-
|
| 487 |
-
with gr.Accordion("⚡ Light Intensity", open=True):
|
| 488 |
-
light_intensity = gr.Radio(
|
| 489 |
-
choices=[
|
| 490 |
-
("None", "none"),
|
| 491 |
-
("Soft", "soft"),
|
| 492 |
-
("Medium", "medium"),
|
| 493 |
-
("Strong", "strong"),
|
| 494 |
-
("Custom", "custom"),
|
| 495 |
-
],
|
| 496 |
-
value="none",
|
| 497 |
-
elem_classes="radio-group"
|
| 498 |
-
)
|
| 499 |
-
light_intensity_custom = gr.Textbox(
|
| 500 |
-
label="Custom Light Intensity",
|
| 501 |
-
placeholder="e.g., Very dim, Blinding bright, Pulsating, etc.",
|
| 502 |
-
visible=False
|
| 503 |
-
)
|
| 504 |
-
|
| 505 |
-
with gr.Accordion("🌍 Illumination Environment", open=False):
|
| 506 |
-
illumination_env = gr.Radio(
|
| 507 |
-
choices=[
|
| 508 |
-
("None", "none"),
|
| 509 |
-
("Sunshine from Window", "sunshine_window"),
|
| 510 |
-
("Neon Night, City", "neon_city"),
|
| 511 |
-
("Sci-Fi RGB Glowing, Cyberpunk", "sci_fi_rgb"),
|
| 512 |
-
("Warm Atmosphere, at Home, Bedroom", "warm_bedroom"),
|
| 513 |
-
("Magic Lit", "magic_lit"),
|
| 514 |
-
("Evil, Gothic, in a Cave", "gothic_cave"),
|
| 515 |
-
("Light and Shadow", "light_shadow"),
|
| 516 |
-
("Shadow from Window", "window_shadow"),
|
| 517 |
-
("Soft Studio Lighting", "soft_studio"),
|
| 518 |
-
("Home Atmosphere, Cozy Bedroom", "cozy_bedroom"),
|
| 519 |
-
("Neon, Wong Kar-wai, Warm", "wong_kar_wai"),
|
| 520 |
-
("Moonlight through Curtains", "moonlight_curtains"),
|
| 521 |
-
("Stormy Sky Lighting", "stormy_sky"),
|
| 522 |
-
("Underwater Glow, Deep Sea", "underwater_glow"),
|
| 523 |
-
("Foggy Forest at Dawn", "foggy_forest"),
|
| 524 |
-
("Golden Hour in a Meadow", "meadow_golden"),
|
| 525 |
-
("Rainbow Reflections, Neon", "rainbow_neon"),
|
| 526 |
-
("Apocalyptic, Smoky Atmosphere", "apocalyptic"),
|
| 527 |
-
("Red Glow, Emergency Lights", "emergency_red"),
|
| 528 |
-
("Mystical Glow, Enchanted Forest", "mystical_forest"),
|
| 529 |
-
("Campfire Light", "campfire"),
|
| 530 |
-
("Harsh, Industrial Lighting", "industrial_harsh"),
|
| 531 |
-
("Sunrise in the Mountains", "mountain_sunrise"),
|
| 532 |
-
("Evening Glow in the Desert", "desert_evening"),
|
| 533 |
-
("Moonlight in a Dark Alley", "dark_alley"),
|
| 534 |
-
("Golden Glow at a Fairground", "fairground"),
|
| 535 |
-
("Midnight in the Forest", "forest_midnight"),
|
| 536 |
-
("Purple and Pink Hues at Twilight", "twilight_purple"),
|
| 537 |
-
("Foggy Morning, Muted Light", "foggy_morning"),
|
| 538 |
-
("Candle-lit Room, Rustic Vibe", "rustic_candle"),
|
| 539 |
-
("Fluorescent Office Lighting", "office_fluorescent"),
|
| 540 |
-
("Lightning Flash in Storm", "storm_lightning"),
|
| 541 |
-
("Night, Cozy Warm Light from Fireplace", "fireplace_night"),
|
| 542 |
-
("Ethereal Glow, Magical Forest", "ethereal_magic"),
|
| 543 |
-
("Dusky Evening on a Beach", "beach_dusky"),
|
| 544 |
-
("Afternoon Light Filtering through Trees", "trees_afternoon"),
|
| 545 |
-
("Blue Neon Light, Urban Street", "urban_blue_neon"),
|
| 546 |
-
("Red and Blue Police Lights in Rain", "rain_police"),
|
| 547 |
-
("Aurora Borealis Glow, Arctic Landscape", "aurora_arctic"),
|
| 548 |
-
("Sunrise through Foggy Mountains", "foggy_mountains"),
|
| 549 |
-
("Golden Hour on a City Skyline", "city_skyline"),
|
| 550 |
-
("Mysterious Twilight, Heavy Mist", "twilight_mist"),
|
| 551 |
-
("Early Morning Rays, Forest Clearing", "forest_rays"),
|
| 552 |
-
("Colorful Lantern Light at Festival", "festival_lantern"),
|
| 553 |
-
("Soft Glow through Stained Glass", "stained_glass"),
|
| 554 |
-
("Harsh Spotlight in Dark Room", "dark_spotlight"),
|
| 555 |
-
("Mellow Evening Glow on a Lake", "lake_evening"),
|
| 556 |
-
("Crystal Reflections in a Cave", "cave_crystal"),
|
| 557 |
-
("Vibrant Autumn Lighting in a Forest", "autumn_forest"),
|
| 558 |
-
("Gentle Snowfall at Dusk", "snowfall_dusk"),
|
| 559 |
-
("Hazy Light of a Winter Morning", "winter_hazy"),
|
| 560 |
-
("Rain-soaked Reflections in City Lights", "rain_city"),
|
| 561 |
-
("Golden Sunlight Streaming through Trees", "trees_golden_sun"),
|
| 562 |
-
("Fireflies Lighting up a Summer Night", "fireflies_summer"),
|
| 563 |
-
("Glowing Embers from a Forge", "forge_embers"),
|
| 564 |
-
("Dim Candlelight in a Gothic Castle", "gothic_castle"),
|
| 565 |
-
("Midnight Sky with Bright Starlight", "starlight_midnight"),
|
| 566 |
-
("Warm Sunset in a Rural Village", "rural_sunset"),
|
| 567 |
-
("Flickering Light in a Haunted House", "haunted_flicker"),
|
| 568 |
-
("Desert Sunset with Mirage-like Glow", "desert_mirage"),
|
| 569 |
-
("Golden Beams Piercing through Storm Clouds", "storm_beams"),
|
| 570 |
-
("Custom", "custom"),
|
| 571 |
-
],
|
| 572 |
-
value="none",
|
| 573 |
-
elem_classes="radio-group"
|
| 574 |
-
)
|
| 575 |
-
illumination_env_custom = gr.Textbox(
|
| 576 |
-
label="Custom Illumination Environment",
|
| 577 |
-
placeholder="e.g., Inside a crystal palace, Underwater volcano, etc.",
|
| 578 |
-
visible=False
|
| 579 |
-
)
|
| 580 |
-
|
| 581 |
-
with gr.Tab("Custom Prompt"):
|
| 582 |
-
with gr.Accordion("✍️ Custom Prompt (in any language)", open=False):
|
| 583 |
-
prompt = gr.Textbox(
|
| 584 |
-
placeholder="Example: Add warm sunset lighting from the right",
|
| 585 |
-
lines=3
|
| 586 |
-
)
|
| 587 |
-
|
| 588 |
with gr.Row():
|
| 589 |
-
|
| 590 |
-
|
| 591 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 592 |
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
| 593 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 594 |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
| 595 |
-
true_guidance_scale = gr.Slider(
|
| 596 |
-
|
| 597 |
-
|
| 598 |
-
|
| 599 |
-
|
| 600 |
-
|
| 601 |
-
|
| 602 |
-
|
| 603 |
-
|
| 604 |
-
|
| 605 |
-
|
| 606 |
-
|
| 607 |
-
|
| 608 |
-
|
| 609 |
-
|
| 610 |
-
|
| 611 |
-
|
| 612 |
-
|
| 613 |
-
|
| 614 |
-
|
| 615 |
-
|
| 616 |
-
|
| 617 |
-
|
| 618 |
-
|
| 619 |
-
|
| 620 |
-
|
| 621 |
-
|
| 622 |
-
|
| 623 |
-
|
| 624 |
-
|
| 625 |
-
|
| 626 |
-
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
|
| 630 |
-
|
| 631 |
-
|
| 632 |
-
|
| 633 |
-
|
| 634 |
-
|
| 635 |
-
|
| 636 |
-
|
| 637 |
-
|
| 638 |
-
|
| 639 |
-
|
| 640 |
-
|
| 641 |
-
|
| 642 |
-
|
| 643 |
-
|
| 644 |
-
create_video_button.click(
|
| 645 |
-
fn=lambda: gr.update(visible=True),
|
| 646 |
-
outputs=[video_group],
|
| 647 |
-
api_name=False
|
| 648 |
-
).then(
|
| 649 |
-
fn=create_video_between_images,
|
| 650 |
-
inputs=[image, result, prompt_preview],
|
| 651 |
-
outputs=[video_output],
|
| 652 |
-
api_name=False
|
| 653 |
-
)
|
| 654 |
-
|
| 655 |
-
# Examples
|
| 656 |
-
gr.Examples(
|
| 657 |
-
examples=[
|
| 658 |
-
["harold.png", "dramatic", "", "side", "", "soft", "", "none", "", "", 0, True, 1.0, 4, 672, 1024],
|
| 659 |
-
["distracted.png", "golden_hour", "", "side", "", "strong", "", "none", "", "", 0, True, 1.0, 4, 640, 1024],
|
| 660 |
-
["disaster.jpg", "moonlight", "", "front", "", "medium", "", "neon_city", "", "", 0, True, 1.0, 4, 640, 1024],
|
| 661 |
-
],
|
| 662 |
-
inputs=[image, light_type, light_type_custom, light_direction, light_direction_custom,
|
| 663 |
-
light_intensity, light_intensity_custom, illumination_env, illumination_env_custom,
|
| 664 |
-
prompt, seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width],
|
| 665 |
-
outputs=outputs,
|
| 666 |
-
fn=infer_relight,
|
| 667 |
-
cache_examples="lazy",
|
| 668 |
-
elem_id="examples"
|
| 669 |
-
)
|
| 670 |
-
|
| 671 |
-
# Image upload triggers dimension update and control reset
|
| 672 |
-
image.upload(
|
| 673 |
-
fn=update_dimensions_on_upload,
|
| 674 |
-
inputs=[image],
|
| 675 |
-
outputs=[width, height]
|
| 676 |
-
).then(
|
| 677 |
-
fn=reset_all,
|
| 678 |
-
inputs=None,
|
| 679 |
-
outputs=[light_type, light_type_custom, light_direction, light_direction_custom,
|
| 680 |
-
light_intensity, light_intensity_custom, illumination_env, illumination_env_custom,
|
| 681 |
-
prompt, is_reset],
|
| 682 |
-
queue=False
|
| 683 |
-
).then(
|
| 684 |
-
fn=end_reset,
|
| 685 |
-
inputs=None,
|
| 686 |
-
outputs=[is_reset],
|
| 687 |
-
queue=False
|
| 688 |
-
)
|
| 689 |
-
|
| 690 |
-
|
| 691 |
-
# Live updates - only trigger on non-custom radio selections
|
| 692 |
-
def maybe_infer(is_reset, progress=gr.Progress(track_tqdm=True), *args):
|
| 693 |
-
if is_reset:
|
| 694 |
-
return gr.update(), gr.update(), gr.update(), gr.update()
|
| 695 |
-
else:
|
| 696 |
-
result_img, result_seed, result_prompt = infer_relight(*args)
|
| 697 |
-
# Show video button if we have both input and output
|
| 698 |
-
show_button = args[0] is not None and result_img is not None
|
| 699 |
-
return result_img, result_seed, result_prompt, gr.update(visible=show_button)
|
| 700 |
-
|
| 701 |
-
control_inputs = [
|
| 702 |
-
image, light_type, light_type_custom, light_direction, light_direction_custom,
|
| 703 |
-
light_intensity, light_intensity_custom, illumination_env, illumination_env_custom,
|
| 704 |
-
prompt, seed, randomize_seed, true_guidance_scale, num_inference_steps, height, width, prev_output
|
| 705 |
-
]
|
| 706 |
-
control_inputs_with_flag = [is_reset] + control_inputs
|
| 707 |
-
|
| 708 |
-
# Only trigger live updates when selecting non-custom options
|
| 709 |
-
def should_trigger_infer(choice):
|
| 710 |
-
return choice != "custom"
|
| 711 |
-
|
| 712 |
-
for control in [light_type, light_direction, light_intensity, illumination_env]:
|
| 713 |
-
control.input(
|
| 714 |
-
fn=lambda choice, is_reset_val, *args, progress=gr.Progress(track_tqdm=True):
|
| 715 |
-
maybe_infer(is_reset_val, progress, *args) if should_trigger_infer(choice) else (gr.update(), gr.update(), gr.update(), gr.update()),
|
| 716 |
-
inputs=[control, is_reset] + control_inputs, # Pass control separately, then is_reset, then the rest
|
| 717 |
-
outputs=outputs + [create_video_button]
|
| 718 |
)
|
| 719 |
-
|
| 720 |
-
run_event.then(lambda img, *_: img, inputs=[result], outputs=[prev_output])
|
| 721 |
|
| 722 |
-
|
|
|
|
|
|
| 12 |
from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
|
| 13 |
|
| 14 |
import math
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
# --- Model Loading ---
|
| 17 |
dtype = torch.bfloat16
|
|
|
|
| 38 |
|
| 39 |
pipe = QwenImageEditPlusPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", scheduler=scheduler, torch_dtype=dtype)
|
| 40 |
|
| 41 |
+
# Load the texture LoRA
|
| 42 |
+
pipe.load_lora_weights("tarn59/apply_texture_qwen_image_edit_2509",
|
| 43 |
+
weight_name="apply_texture_qwen_image_edit_2509.safetensors", adapter_name="texture")
|
| 44 |
pipe.load_lora_weights("lightx2v/Qwen-Image-Lightning",
|
| 45 |
weight_name="Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors", adapter_name="lightning")
|
| 46 |
+
pipe.set_adapters(["texture", "lightning"], adapter_weights=[1., 1.])
|
| 47 |
+
pipe.fuse_lora(adapter_names=["texture", "lightning"], lora_scale=1)
|
| 48 |
pipe.unload_lora_weights()
|
| 49 |
|
| 50 |
pipe.transformer.__class__ = QwenImageTransformer2DModel
|
|
|
|
| 56 |
|
| 57 |
MAX_SEED = np.iinfo(np.int32).max
|
| 58 |
|
| 59 |
+
def calculate_dimensions(image):
|
| 60 |
+
"""Calculate output dimensions based on content image, keeping largest side at 1024."""
|
| 61 |
+
if image is None:
|
| 62 |
+
return 1024, 1024
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
+
original_width, original_height = image.size
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
+
if original_width > original_height:
|
| 67 |
+
new_width = 1024
|
| 68 |
+
aspect_ratio = original_height / original_width
|
| 69 |
+
new_height = int(new_width * aspect_ratio)
|
| 70 |
+
else:
|
| 71 |
+
new_height = 1024
|
| 72 |
+
aspect_ratio = original_width / original_height
|
| 73 |
+
new_width = int(new_height * aspect_ratio)
|
| 74 |
|
| 75 |
+
# Ensure dimensions are multiples of 8
|
| 76 |
+
new_width = (new_width // 8) * 8
|
| 77 |
+
new_height = (new_height // 8) * 8
|
| 78 |
|
| 79 |
+
return new_width, new_height
|
|
|
|
| 80 |
|
| 81 |
@spaces.GPU
|
| 82 |
+
def apply_texture(
|
| 83 |
+
content_image,
|
| 84 |
+
texture_image,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
prompt,
|
| 86 |
+
seed=42,
|
| 87 |
+
randomize_seed=False,
|
| 88 |
+
true_guidance_scale=False,
|
| 89 |
+
num_inference_steps=4,
|
|
|
|
|
|
|
|
|
|
| 90 |
progress=gr.Progress(track_tqdm=True)
|
| 91 |
):
|
| 92 |
+
if content_image is None:
|
| 93 |
+
raise gr.Error("Please upload a content image.")
|
| 94 |
+
if texture_image is None:
|
| 95 |
+
raise gr.Error("Please upload a texture image.")
|
| 96 |
+
if not prompt or not prompt.strip():
|
| 97 |
+
raise gr.Error("Please provide a description.")
|
| 98 |
+
|
| 99 |
if randomize_seed:
|
| 100 |
seed = random.randint(0, MAX_SEED)
|
| 101 |
generator = torch.Generator(device=device).manual_seed(seed)
|
| 102 |
+
|
| 103 |
+
# Calculate dimensions based on content image
|
| 104 |
+
width, height = calculate_dimensions(content_image)
|
| 105 |
+
|
| 106 |
+
# Prepare images
|
| 107 |
+
content_pil = content_image.convert("RGB") if isinstance(content_image, Image.Image) else Image.open(content_image.name).convert("RGB")
|
| 108 |
+
texture_pil = texture_image.convert("RGB") if isinstance(texture_image, Image.Image) else Image.open(texture_image.name).convert("RGB")
|
| 109 |
+
|
| 110 |
+
pil_images = [content_pil, texture_pil]
|
| 111 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
result = pipe(
|
| 113 |
image=pil_images,
|
| 114 |
+
prompt=prompt,
|
| 115 |
+
height=height,
|
| 116 |
+
width=width,
|
| 117 |
num_inference_steps=num_inference_steps,
|
| 118 |
generator=generator,
|
| 119 |
true_cfg_scale=true_guidance_scale,
|
| 120 |
num_images_per_prompt=1,
|
| 121 |
).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
|
| 123 |
+
return result, seed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
# --- UI ---
|
| 126 |
css = '''
|
| 127 |
+
#col-container { max-width: 800px; margin: 0 auto; }
|
| 128 |
.dark .progress-text{color: white !important}
|
| 129 |
+
#examples{max-width: 800px; margin: 0 auto; }
|
|
|
|
|
|
|
| 130 |
'''
|
| 131 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
|
| 133 |
with gr.Column(elem_id="col-container"):
|
| 134 |
+
gr.Markdown("# Apply Texture — Qwen Image Edit")
|
| 135 |
gr.Markdown("""
|
| 136 |
+
Using [tarn59's Apply-Texture-Qwen-Image-Edit-2509 LoRA](https://huggingface.co/tarn59/apply_texture_qwen_image_edit_2509)
|
| 137 |
+
and [lightx2v/Qwen-Image-Lightning](https://huggingface.co/lightx2v/Qwen-Image-Lightning) for 4-step inference 💨
|
| 138 |
+
""")
|
| 139 |
+
|
| 140 |
with gr.Row():
|
| 141 |
+
with gr.Column():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
with gr.Row():
|
| 143 |
+
content_image = gr.Image(label="Content", type="pil")
|
| 144 |
+
texture_image = gr.Image(label="Texture", type="pil")
|
| 145 |
+
|
| 146 |
+
prompt = gr.Textbox(
|
| 147 |
+
label="Describe",
|
| 148 |
+
info="Apply ... texture to ...",
|
| 149 |
+
placeholder="Apply wood siding texture to building walls."
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
button = gr.Button("✨ Generate", variant="primary")
|
| 153 |
+
|
| 154 |
with gr.Accordion("⚙️ Advanced Settings", open=False):
|
| 155 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 156 |
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
|
| 157 |
+
true_guidance_scale = gr.Slider(
|
| 158 |
+
label="True Guidance Scale",
|
| 159 |
+
minimum=1.0,
|
| 160 |
+
maximum=10.0,
|
| 161 |
+
step=0.1,
|
| 162 |
+
value=1.0
|
| 163 |
+
)
|
| 164 |
+
num_inference_steps = gr.Slider(
|
| 165 |
+
label="Inference Steps",
|
| 166 |
+
minimum=1,
|
| 167 |
+
maximum=40,
|
| 168 |
+
step=1,
|
| 169 |
+
value=4
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
with gr.Column():
|
| 173 |
+
output = gr.Image(label="Output", interactive=False)
|
| 174 |
+
seed_output = gr.Number(label="Used Seed", visible=False)
|
| 175 |
+
|
| 176 |
+
# Event handlers
|
| 177 |
+
button.click(
|
| 178 |
+
fn=apply_texture,
|
| 179 |
+
inputs=[
|
| 180 |
+
content_image,
|
| 181 |
+
texture_image,
|
| 182 |
+
prompt,
|
| 183 |
+
seed,
|
| 184 |
+
randomize_seed,
|
| 185 |
+
true_guidance_scale,
|
| 186 |
+
num_inference_steps
|
| 187 |
+
],
|
| 188 |
+
outputs=[output, seed_output]
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
# Examples
|
| 192 |
+
gr.Examples(
|
| 193 |
+
examples=[
|
| 194 |
+
["coffee_mug.png", "wood_boxes.png", "Apply wood texture to mug"],
|
| 195 |
+
["leaf.webp", "salmon.webp", "Apply salmon texture to leaves and stems"],
|
| 196 |
+
],
|
| 197 |
+
inputs=[
|
| 198 |
+
content_image,
|
| 199 |
+
texture_image,
|
| 200 |
+
prompt,
|
| 201 |
+
],
|
| 202 |
+
outputs=[output, seed_output],
|
| 203 |
+
fn=apply_texture,
|
| 204 |
+
cache_examples="lazy",
|
| 205 |
+
elem_id="examples"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 206 |
)
|
|
|
|
|
|
|
| 207 |
|
| 208 |
+
if __name__ == "__main__":
|
| 209 |
+
demo.launch()
|