akhaliq HF Staff commited on
Commit
d0811b3
·
1 Parent(s): 9140b33

add image to image feature

Browse files
Files changed (1) hide show
  1. app.py +281 -57
app.py CHANGED
@@ -917,6 +917,76 @@ def generate_image_with_qwen(prompt: str, image_index: int = 0) -> str:
917
  print(f"Image generation error: {str(e)}")
918
  return f"Error generating image: {str(e)}"
919
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
920
  def extract_image_prompts_from_text(text: str, num_images_needed: int = 1) -> list:
921
  """Extract image generation prompts from the full text based on number of images needed"""
922
  # Use the entire text as the base prompt for image generation
@@ -1061,25 +1131,132 @@ def create_image_replacement_blocks(html_content: str, user_prompt: str) -> str:
1061
 
1062
  return '\n\n'.join(replacement_blocks)
1063
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1064
  def create_multimodal_message(text, image=None):
1065
- """Create a multimodal message with text and optional image"""
 
 
 
 
1066
  if image is None:
1067
  return {"role": "user", "content": text}
1068
-
1069
- content = [
1070
- {
1071
- "type": "text",
1072
- "text": text
1073
- },
1074
- {
1075
- "type": "image_url",
1076
- "image_url": {
1077
- "url": process_image_for_model(image)
1078
- }
1079
- }
1080
- ]
1081
-
1082
- return {"role": "user", "content": content}
1083
 
1084
  def apply_search_replace_changes(original_content: str, changes_text: str) -> str:
1085
  """Apply search/replace changes to content (HTML, Python, etc.)"""
@@ -1733,7 +1910,7 @@ The HTML code above contains the complete original website structure with all im
1733
  stop_generation = False
1734
 
1735
 
1736
- def generation_code(query: Optional[str], image: Optional[gr.Image], file: Optional[str], website_url: Optional[str], _setting: Dict[str, str], _history: Optional[History], _current_model: Dict, enable_search: bool = False, language: str = "html", provider: str = "auto", enable_image_generation: bool = False):
1737
  if query is None:
1738
  query = ''
1739
  if _history is None:
@@ -1850,14 +2027,15 @@ This will help me create a better design for you."""
1850
 
1851
  clean_code = remove_code_block(content)
1852
 
1853
- # Apply image generation if enabled and this is HTML content
1854
- final_content = content
1855
- if enable_image_generation and language == "html" and (clean_code.strip().startswith('<!DOCTYPE html>') or clean_code.strip().startswith('<html')):
1856
- # Create search/replace blocks for image replacement based on images found in code
1857
- image_replacement_blocks = create_image_replacement_blocks(content, query)
1858
- if image_replacement_blocks:
1859
- # Apply the image replacements using existing search/replace logic
1860
- final_content = apply_search_replace_changes(content, image_replacement_blocks)
 
1861
 
1862
  _history.append([query, final_content])
1863
 
@@ -2010,13 +2188,15 @@ This will help me create a better design for you."""
2010
  modified_content = apply_search_replace_changes(last_content, clean_code)
2011
  clean_content = remove_code_block(modified_content)
2012
 
2013
- # Apply image generation if enabled and this is HTML content
2014
- if enable_image_generation and language == "html" and (clean_content.strip().startswith('<!DOCTYPE html>') or clean_content.strip().startswith('<html')):
2015
- # Create search/replace blocks for image replacement based on images found in code
2016
- image_replacement_blocks = create_image_replacement_blocks(clean_content, query)
2017
- if image_replacement_blocks:
2018
- # Apply the image replacements using existing search/replace logic
2019
- clean_content = apply_search_replace_changes(clean_content, image_replacement_blocks)
 
 
2020
 
2021
  yield {
2022
  code_output: clean_content,
@@ -2025,14 +2205,16 @@ This will help me create a better design for you."""
2025
  history_output: history_to_chatbot_messages(_history),
2026
  }
2027
  else:
2028
- # Apply image generation if enabled and this is HTML content
2029
- final_content = clean_code
2030
- if enable_image_generation and language == "html" and (final_content.strip().startswith('<!DOCTYPE html>') or final_content.strip().startswith('<html')):
2031
- # Create search/replace blocks for image replacement based on images found in code
2032
- image_replacement_blocks = create_image_replacement_blocks(final_content, query)
2033
- if image_replacement_blocks:
2034
- # Apply the image replacements using existing search/replace logic
2035
- final_content = apply_search_replace_changes(final_content, image_replacement_blocks)
 
 
2036
 
2037
  yield {
2038
  code_output: final_content,
@@ -2245,13 +2427,16 @@ This will help me create a better design for you."""
2245
  modified_content = apply_search_replace_changes(last_content, final_code)
2246
  clean_content = remove_code_block(modified_content)
2247
 
2248
- # Apply image generation if enabled and this is HTML content
2249
- if enable_image_generation and language == "html" and (clean_content.strip().startswith('<!DOCTYPE html>') or clean_content.strip().startswith('<html')):
2250
- # Create search/replace blocks for image replacement based on images found in code
2251
- image_replacement_blocks = create_image_replacement_blocks(clean_content, query)
2252
- if image_replacement_blocks:
2253
- # Apply the image replacements using existing search/replace logic
2254
- clean_content = apply_search_replace_changes(clean_content, image_replacement_blocks)
 
 
 
2255
 
2256
  # Update history with the cleaned content
2257
  _history.append([query, clean_content])
@@ -2265,13 +2450,16 @@ This will help me create a better design for you."""
2265
  # Regular generation - use the content as is
2266
  final_content = remove_code_block(content)
2267
 
2268
- # Apply image generation if enabled and this is HTML content
2269
- if enable_image_generation and language == "html" and (final_content.strip().startswith('<!DOCTYPE html>') or final_content.strip().startswith('<html')):
2270
- # Create search/replace blocks for image replacement based on images found in code
2271
- image_replacement_blocks = create_image_replacement_blocks(final_content, query)
2272
- if image_replacement_blocks:
2273
- # Apply the image replacements using existing search/replace logic
2274
- final_content = apply_search_replace_changes(final_content, image_replacement_blocks)
 
 
 
2275
 
2276
  _history.append([query, final_content])
2277
  yield {
@@ -3049,6 +3237,12 @@ with gr.Blocks(
3049
  label="UI design image",
3050
  visible=False
3051
  )
 
 
 
 
 
 
3052
  with gr.Row():
3053
  btn = gr.Button("Generate", variant="primary", size="lg", scale=2, visible=True)
3054
  clear_btn = gr.Button("Clear", variant="secondary", size="sm", scale=1, visible=True)
@@ -3080,13 +3274,43 @@ with gr.Blocks(
3080
  value=False,
3081
  visible=True
3082
  )
3083
- # Image generation toggle
3084
  image_generation_toggle = gr.Checkbox(
3085
- label="🎨 Generate Images",
3086
  value=False,
3087
  visible=True,
3088
  info="Include generated images in your outputs using Qwen image model"
3089
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3090
  model_dropdown = gr.Dropdown(
3091
  choices=[model['name'] for model in AVAILABLE_MODELS],
3092
  value="Qwen3-Coder-480B-A35B-Instruct",
@@ -3256,7 +3480,7 @@ with gr.Blocks(
3256
 
3257
  btn.click(
3258
  generation_code,
3259
- inputs=[input, image_input, file_input, website_url_input, setting, history, current_model, search_toggle, language_dropdown, provider_state, image_generation_toggle],
3260
  outputs=[code_output, history, sandbox, history_output]
3261
  ).then(
3262
  show_deploy_components,
 
917
  print(f"Image generation error: {str(e)}")
918
  return f"Error generating image: {str(e)}"
919
 
920
+ def generate_image_to_image(input_image_data, prompt: str) -> str:
921
+ """Generate an image using image-to-image with FLUX.1-Kontext-dev via Hugging Face InferenceClient.
922
+
923
+ Returns an HTML <img> tag with optimized base64 JPEG data, similar to text-to-image output.
924
+ """
925
+ try:
926
+ # Check token
927
+ if not os.getenv('HF_TOKEN'):
928
+ return "Error: HF_TOKEN environment variable is not set. Please set it to your Hugging Face API token."
929
+
930
+ # Prepare client
931
+ client = InferenceClient(
932
+ provider="auto",
933
+ api_key=os.getenv('HF_TOKEN'),
934
+ bill_to="huggingface",
935
+ )
936
+
937
+ # Normalize input image to bytes
938
+ import io
939
+ from PIL import Image
940
+ try:
941
+ import numpy as np
942
+ except Exception:
943
+ np = None
944
+
945
+ if hasattr(input_image_data, 'read'):
946
+ # File-like object
947
+ raw = input_image_data.read()
948
+ pil_image = Image.open(io.BytesIO(raw))
949
+ elif hasattr(input_image_data, 'mode') and hasattr(input_image_data, 'size'):
950
+ # PIL Image
951
+ pil_image = input_image_data
952
+ elif np is not None and isinstance(input_image_data, np.ndarray):
953
+ pil_image = Image.fromarray(input_image_data)
954
+ elif isinstance(input_image_data, (bytes, bytearray)):
955
+ pil_image = Image.open(io.BytesIO(input_image_data))
956
+ else:
957
+ # Fallback: try to convert via bytes
958
+ pil_image = Image.open(io.BytesIO(bytes(input_image_data)))
959
+
960
+ # Ensure RGB
961
+ if pil_image.mode != 'RGB':
962
+ pil_image = pil_image.convert('RGB')
963
+
964
+ buf = io.BytesIO()
965
+ pil_image.save(buf, format='PNG')
966
+ input_bytes = buf.getvalue()
967
+
968
+ # Call image-to-image
969
+ image = client.image_to_image(
970
+ input_bytes,
971
+ prompt=prompt,
972
+ model="black-forest-labs/FLUX.1-Kontext-dev",
973
+ )
974
+
975
+ # Resize/optimize
976
+ max_size = 512
977
+ if image.width > max_size or image.height > max_size:
978
+ image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
979
+
980
+ out_buf = io.BytesIO()
981
+ image.convert('RGB').save(out_buf, format='JPEG', quality=85, optimize=True)
982
+
983
+ import base64
984
+ img_str = base64.b64encode(out_buf.getvalue()).decode()
985
+ return f"<img src=\"data:image/jpeg;base64,{img_str}\" alt=\"{prompt}\" style=\"max-width: 100%; height: auto; border-radius: 8px; margin: 10px 0;\" loading=\"lazy\" />"
986
+ except Exception as e:
987
+ print(f"Image-to-image generation error: {str(e)}")
988
+ return f"Error generating image (image-to-image): {str(e)}"
989
+
990
  def extract_image_prompts_from_text(text: str, num_images_needed: int = 1) -> list:
991
  """Extract image generation prompts from the full text based on number of images needed"""
992
  # Use the entire text as the base prompt for image generation
 
1131
 
1132
  return '\n\n'.join(replacement_blocks)
1133
 
1134
+ def create_image_replacement_blocks_from_input_image(html_content: str, user_prompt: str, input_image_data, max_images: int = 1) -> str:
1135
+ """Create search/replace blocks using image-to-image generation with a provided input image.
1136
+
1137
+ Mirrors placeholder detection from create_image_replacement_blocks but uses generate_image_to_image.
1138
+ """
1139
+ if not user_prompt:
1140
+ return ""
1141
+
1142
+ import re
1143
+
1144
+ placeholder_patterns = [
1145
+ r'<img[^>]*src=["\'](?:placeholder|dummy|sample|example)[^"\']*["\'][^>]*>',
1146
+ r'<img[^>]*src=["\']https?://via\.placeholder\.com[^"\']*["\'][^>]*>',
1147
+ r'<img[^>]*src=["\']https?://picsum\.photos[^"\']*["\'][^>]*>',
1148
+ r'<img[^>]*src=["\']https?://dummyimage\.com[^"\']*["\'][^>]*>',
1149
+ r'<img[^>]*alt=["\'][^"\']*placeholder[^"\']*["\'][^>]*>',
1150
+ r'<img[^>]*class=["\'][^"\']*placeholder[^"\']*["\'][^>]*>',
1151
+ r'<img[^>]*id=["\'][^"\']*placeholder[^"\']*["\'][^>]*>',
1152
+ r'<img[^>]*src=["\']data:image[^"\']*["\'][^>]*>',
1153
+ r'<img[^>]*src=["\']#["\'][^>]*>',
1154
+ r'<img[^>]*src=["\']about:blank["\'][^>]*>',
1155
+ ]
1156
+
1157
+ placeholder_images = []
1158
+ for pattern in placeholder_patterns:
1159
+ matches = re.findall(pattern, html_content, re.IGNORECASE)
1160
+ placeholder_images.extend(matches)
1161
+
1162
+ if not placeholder_images:
1163
+ img_pattern = r'<img[^>]*>'
1164
+ placeholder_images = re.findall(img_pattern, html_content)
1165
+
1166
+ div_placeholder_patterns = [
1167
+ r'<div[^>]*class=["\'][^"\']*(?:image|img|photo|picture)[^"\']*["\'][^>]*>.*?</div>',
1168
+ r'<div[^>]*id=["\'][^"\']*(?:image|img|photo|picture)[^"\']*["\'][^>]*>.*?</div>',
1169
+ ]
1170
+ for pattern in div_placeholder_patterns:
1171
+ matches = re.findall(pattern, html_content, re.IGNORECASE | re.DOTALL)
1172
+ placeholder_images.extend(matches)
1173
+
1174
+ num_images_needed = len(placeholder_images)
1175
+ num_to_replace = min(num_images_needed, max(0, int(max_images)))
1176
+ if num_images_needed == 0:
1177
+ # No placeholders; generate one image to append (only if at least one upload is present)
1178
+ if num_to_replace <= 0:
1179
+ return ""
1180
+ prompts = extract_image_prompts_from_text(user_prompt, 1)
1181
+ if not prompts:
1182
+ return ""
1183
+ image_html = generate_image_to_image(input_image_data, prompts[0])
1184
+ if image_html.startswith("Error"):
1185
+ return ""
1186
+ return f"{SEARCH_START}\n\n{DIVIDER}\n<div class=\"generated-images\">{image_html}</div>\n{REPLACE_END}"
1187
+
1188
+ if num_to_replace <= 0:
1189
+ return ""
1190
+ image_prompts = extract_image_prompts_from_text(user_prompt, num_to_replace)
1191
+
1192
+ generated_images = []
1193
+ for i, prompt in enumerate(image_prompts):
1194
+ image_html = generate_image_to_image(input_image_data, prompt)
1195
+ if not image_html.startswith("Error"):
1196
+ generated_images.append((i, image_html))
1197
+
1198
+ if not generated_images:
1199
+ return ""
1200
+
1201
+ replacement_blocks = []
1202
+ for i, (prompt_index, generated_image) in enumerate(generated_images):
1203
+ if i < num_to_replace and i < len(placeholder_images):
1204
+ placeholder = placeholder_images[i]
1205
+ placeholder_clean = re.sub(r'\s+', ' ', placeholder.strip())
1206
+ placeholder_variations = [
1207
+ placeholder_clean,
1208
+ placeholder_clean.replace('"', "'"),
1209
+ placeholder_clean.replace("'", '"'),
1210
+ re.sub(r'\s+', ' ', placeholder_clean),
1211
+ placeholder_clean.replace(' ', ' '),
1212
+ ]
1213
+ for variation in placeholder_variations:
1214
+ replacement_blocks.append(f"""{SEARCH_START}
1215
+ {variation}
1216
+ {DIVIDER}
1217
+ {generated_image}
1218
+ {REPLACE_END}""")
1219
+ # Do not insert additional images beyond the uploaded count
1220
+
1221
+ return '\n\n'.join(replacement_blocks)
1222
+
1223
+ def apply_generated_images_to_html(html_content: str, user_prompt: str, enable_text_to_image: bool, enable_image_to_image: bool, input_image_data, image_to_image_prompt: str | None = None, text_to_image_prompt: str | None = None) -> str:
1224
+ """Apply text-to-image and/or image-to-image replacements to HTML content.
1225
+
1226
+ If both toggles are enabled, text-to-image replacements run first, then image-to-image.
1227
+ """
1228
+ result = html_content
1229
+ try:
1230
+ # If an input image is provided and image-to-image is enabled, we only replace one image
1231
+ # and skip text-to-image to satisfy the requirement to replace exactly the number of uploaded images.
1232
+ if enable_image_to_image and input_image_data is not None and (result.strip().startswith('<!DOCTYPE html>') or result.strip().startswith('<html')):
1233
+ # Prefer the dedicated image-to-image prompt if provided
1234
+ i2i_prompt = (image_to_image_prompt or user_prompt or "").strip()
1235
+ blocks2 = create_image_replacement_blocks_from_input_image(result, i2i_prompt, input_image_data, max_images=1)
1236
+ if blocks2:
1237
+ result = apply_search_replace_changes(result, blocks2)
1238
+ return result
1239
+
1240
+ if enable_text_to_image and (result.strip().startswith('<!DOCTYPE html>') or result.strip().startswith('<html')):
1241
+ t2i_prompt = (text_to_image_prompt or user_prompt or "").strip()
1242
+ blocks = create_image_replacement_blocks(result, t2i_prompt)
1243
+ if blocks:
1244
+ result = apply_search_replace_changes(result, blocks)
1245
+ except Exception:
1246
+ return html_content
1247
+ return result
1248
+
1249
  def create_multimodal_message(text, image=None):
1250
+ """Create a chat message. For broad provider compatibility, always return content as a string.
1251
+
1252
+ Some providers (e.g., Hugging Face router endpoints like Cerebras) expect `content` to be a string,
1253
+ not a list of typed parts. To avoid 422 validation errors, we inline a brief note when an image is provided.
1254
+ """
1255
  if image is None:
1256
  return {"role": "user", "content": text}
1257
+ # Keep providers happy: avoid structured multimodal payloads; add a short note instead
1258
+ # If needed, this can be enhanced per-model with proper multimodal schemas.
1259
+ return {"role": "user", "content": f"{text}\n\n[An image was provided as reference.]"}
 
 
 
 
 
 
 
 
 
 
 
 
1260
 
1261
  def apply_search_replace_changes(original_content: str, changes_text: str) -> str:
1262
  """Apply search/replace changes to content (HTML, Python, etc.)"""
 
1910
  stop_generation = False
1911
 
1912
 
1913
+ def generation_code(query: Optional[str], image: Optional[gr.Image], file: Optional[str], website_url: Optional[str], _setting: Dict[str, str], _history: Optional[History], _current_model: Dict, enable_search: bool = False, language: str = "html", provider: str = "auto", enable_image_generation: bool = False, enable_image_to_image: bool = False, image_to_image_prompt: Optional[str] = None, text_to_image_prompt: Optional[str] = None):
1914
  if query is None:
1915
  query = ''
1916
  if _history is None:
 
2027
 
2028
  clean_code = remove_code_block(content)
2029
 
2030
+ # Apply image generation (text→image and/or image→image)
2031
+ final_content = apply_generated_images_to_html(
2032
+ content,
2033
+ query,
2034
+ enable_text_to_image=enable_image_generation,
2035
+ enable_image_to_image=enable_image_to_image,
2036
+ input_image_data=image,
2037
+ image_to_image_prompt=image_to_image_prompt,
2038
+ )
2039
 
2040
  _history.append([query, final_content])
2041
 
 
2188
  modified_content = apply_search_replace_changes(last_content, clean_code)
2189
  clean_content = remove_code_block(modified_content)
2190
 
2191
+ # Apply image generation (text→image and/or image→image)
2192
+ clean_content = apply_generated_images_to_html(
2193
+ clean_content,
2194
+ query,
2195
+ enable_text_to_image=enable_image_generation,
2196
+ enable_image_to_image=enable_image_to_image,
2197
+ input_image_data=image,
2198
+ image_to_image_prompt=image_to_image_prompt,
2199
+ )
2200
 
2201
  yield {
2202
  code_output: clean_content,
 
2205
  history_output: history_to_chatbot_messages(_history),
2206
  }
2207
  else:
2208
+ # Apply image generation (text→image and/or image→image)
2209
+ final_content = apply_generated_images_to_html(
2210
+ clean_code,
2211
+ query,
2212
+ enable_text_to_image=enable_image_generation,
2213
+ enable_image_to_image=enable_image_to_image,
2214
+ input_image_data=image,
2215
+ image_to_image_prompt=image_to_image_prompt,
2216
+ text_to_image_prompt=text_to_image_prompt,
2217
+ )
2218
 
2219
  yield {
2220
  code_output: final_content,
 
2427
  modified_content = apply_search_replace_changes(last_content, final_code)
2428
  clean_content = remove_code_block(modified_content)
2429
 
2430
+ # Apply image generation (text→image and/or image→image)
2431
+ clean_content = apply_generated_images_to_html(
2432
+ clean_content,
2433
+ query,
2434
+ enable_text_to_image=enable_image_generation,
2435
+ enable_image_to_image=enable_image_to_image,
2436
+ input_image_data=image,
2437
+ image_to_image_prompt=image_to_image_prompt,
2438
+ text_to_image_prompt=text_to_image_prompt,
2439
+ )
2440
 
2441
  # Update history with the cleaned content
2442
  _history.append([query, clean_content])
 
2450
  # Regular generation - use the content as is
2451
  final_content = remove_code_block(content)
2452
 
2453
+ # Apply image generation (text→image and/or image→image)
2454
+ final_content = apply_generated_images_to_html(
2455
+ final_content,
2456
+ query,
2457
+ enable_text_to_image=enable_image_generation,
2458
+ enable_image_to_image=enable_image_to_image,
2459
+ input_image_data=image,
2460
+ image_to_image_prompt=image_to_image_prompt,
2461
+ text_to_image_prompt=text_to_image_prompt,
2462
+ )
2463
 
2464
  _history.append([query, final_content])
2465
  yield {
 
3237
  label="UI design image",
3238
  visible=False
3239
  )
3240
+ image_to_image_prompt = gr.Textbox(
3241
+ label="Image-to-Image Prompt",
3242
+ placeholder="Describe how to transform the uploaded image (e.g., 'Turn the cat into a tiger.')",
3243
+ lines=2,
3244
+ visible=False
3245
+ )
3246
  with gr.Row():
3247
  btn = gr.Button("Generate", variant="primary", size="lg", scale=2, visible=True)
3248
  clear_btn = gr.Button("Clear", variant="secondary", size="sm", scale=1, visible=True)
 
3274
  value=False,
3275
  visible=True
3276
  )
3277
+ # Image generation toggles
3278
  image_generation_toggle = gr.Checkbox(
3279
+ label="🎨 Generate Images (text → image)",
3280
  value=False,
3281
  visible=True,
3282
  info="Include generated images in your outputs using Qwen image model"
3283
  )
3284
+ text_to_image_prompt = gr.Textbox(
3285
+ label="Text-to-Image Prompt",
3286
+ placeholder="Describe the image to generate (e.g., 'A minimalist dashboard hero illustration in pastel colors.')",
3287
+ lines=2,
3288
+ visible=False
3289
+ )
3290
+ image_to_image_toggle = gr.Checkbox(
3291
+ label="🖼️ Image to Image (uses input image)",
3292
+ value=False,
3293
+ visible=True,
3294
+ info="Transform your uploaded image using FLUX.1-Kontext-dev"
3295
+ )
3296
+
3297
+ def on_image_to_image_toggle(toggled):
3298
+ # Show image input and its prompt when image-to-image is enabled
3299
+ return gr.update(visible=bool(toggled)), gr.update(visible=bool(toggled))
3300
+
3301
+ def on_text_to_image_toggle(toggled):
3302
+ return gr.update(visible=bool(toggled))
3303
+
3304
+ image_to_image_toggle.change(
3305
+ on_image_to_image_toggle,
3306
+ inputs=[image_to_image_toggle],
3307
+ outputs=[image_input, image_to_image_prompt]
3308
+ )
3309
+ image_generation_toggle.change(
3310
+ on_text_to_image_toggle,
3311
+ inputs=[image_generation_toggle],
3312
+ outputs=[text_to_image_prompt]
3313
+ )
3314
  model_dropdown = gr.Dropdown(
3315
  choices=[model['name'] for model in AVAILABLE_MODELS],
3316
  value="Qwen3-Coder-480B-A35B-Instruct",
 
3480
 
3481
  btn.click(
3482
  generation_code,
3483
+ inputs=[input, image_input, file_input, website_url_input, setting, history, current_model, search_toggle, language_dropdown, provider_state, image_generation_toggle, image_to_image_toggle, image_to_image_prompt, text_to_image_prompt],
3484
  outputs=[code_output, history, sandbox, history_output]
3485
  ).then(
3486
  show_deploy_components,