akhaliq HF Staff commited on
Commit
9140b33
·
1 Parent(s): fe5a100

gpt 5 streaming working

Browse files
Files changed (1) hide show
  1. app.py +16 -107
app.py CHANGED
@@ -2057,114 +2057,23 @@ This will help me create a better design for you."""
2057
  messages=messages,
2058
  max_tokens=16384
2059
  )
2060
- elif _current_model["id"] == "gpt-5":
2061
- # Special handling for GPT-5 model - no streaming due to organization verification requirement
2062
- completion = client.chat.completions.create(
2063
- model="gpt-5",
2064
- messages=messages,
2065
- max_completion_tokens=16384
2066
- )
2067
- # Handle non-streaming response
2068
- content = completion.choices[0].message.content
2069
- clean_code = remove_code_block(content)
2070
-
2071
- # Apply image generation if enabled and this is HTML content
2072
- final_content = content
2073
- if enable_image_generation and language == "html" and (clean_code.strip().startswith('<!DOCTYPE html>') or clean_code.strip().startswith('<html')):
2074
- # Create search/replace blocks for image replacement based on images found in code
2075
- image_replacement_blocks = create_image_replacement_blocks(content, query)
2076
- if image_replacement_blocks:
2077
- # Apply the image replacements using existing search/replace logic
2078
- final_content = apply_search_replace_changes(content, image_replacement_blocks)
2079
-
2080
- _history.append([query, final_content])
2081
-
2082
- if language == "transformers.js":
2083
- files = parse_transformers_js_output(clean_code)
2084
- if files['index.html'] and files['index.js'] and files['style.css']:
2085
- # Apply image generation if enabled
2086
- if enable_image_generation:
2087
- # Create search/replace blocks for image replacement based on images found in code
2088
- image_replacement_blocks = create_image_replacement_blocks(files['index.html'], query)
2089
- if image_replacement_blocks:
2090
- # Apply the image replacements using existing search/replace logic
2091
- files['index.html'] = apply_search_replace_changes(files['index.html'], image_replacement_blocks)
2092
-
2093
- formatted_output = format_transformers_js_output(files)
2094
- yield {
2095
- code_output: formatted_output,
2096
- history: _history,
2097
- sandbox: send_to_sandbox(files['index.html']),
2098
- history_output: history_to_chatbot_messages(_history),
2099
- }
2100
- else:
2101
- yield {
2102
- code_output: clean_code,
2103
- history: _history,
2104
- sandbox: "<div style='padding:1em;color:#888;text-align:center;'>Error parsing transformers.js output. Please try again.</div>",
2105
- history_output: history_to_chatbot_messages(_history),
2106
- }
2107
- elif language == "svelte":
2108
- files = parse_svelte_output(clean_code)
2109
- if files['src/App.svelte'] and files['src/app.css']:
2110
- formatted_output = format_svelte_output(files)
2111
- yield {
2112
- code_output: formatted_output,
2113
- history: _history,
2114
- sandbox: "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your Svelte code using the download button above.</div>",
2115
- history_output: history_to_chatbot_messages(_history),
2116
- }
2117
- else:
2118
- yield {
2119
- code_output: clean_code,
2120
- history: _history,
2121
- sandbox: "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your Svelte code using the download button above.</div>",
2122
- history_output: history_to_chatbot_messages(_history),
2123
- }
2124
- else:
2125
- if has_existing_content and not (clean_code.strip().startswith("<!DOCTYPE html>") or clean_code.strip().startswith("<html")):
2126
- last_content = _history[-1][1] if _history and len(_history[-1]) > 1 else ""
2127
- modified_content = apply_search_replace_changes(last_content, clean_code)
2128
- clean_content = remove_code_block(modified_content)
2129
-
2130
- # Apply image generation if enabled and this is HTML content
2131
- if enable_image_generation and language == "html" and (clean_content.strip().startswith('<!DOCTYPE html>') or clean_content.strip().startswith('<html')):
2132
- # Create search/replace blocks for image replacement based on images found in code
2133
- image_replacement_blocks = create_image_replacement_blocks(clean_content, query)
2134
- if image_replacement_blocks:
2135
- # Apply the image replacements using existing search/replace logic
2136
- clean_content = apply_search_replace_changes(clean_content, image_replacement_blocks)
2137
-
2138
- yield {
2139
- code_output: clean_content,
2140
- history: _history,
2141
- sandbox: send_to_sandbox(clean_content) if language == "html" else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>",
2142
- history_output: history_to_chatbot_messages(_history),
2143
- }
2144
- else:
2145
- # Apply image generation if enabled and this is HTML content
2146
- final_content = clean_code
2147
- if enable_image_generation and language == "html" and (final_content.strip().startswith('<!DOCTYPE html>') or final_content.strip().startswith('<html')):
2148
- # Create search/replace blocks for image replacement based on images found in code
2149
- image_replacement_blocks = create_image_replacement_blocks(final_content, query)
2150
- if image_replacement_blocks:
2151
- # Apply the image replacements using existing search/replace logic
2152
- final_content = apply_search_replace_changes(final_content, image_replacement_blocks)
2153
-
2154
- yield {
2155
- code_output: final_content,
2156
- history: _history,
2157
- sandbox: send_to_sandbox(final_content) if language == "html" else "<div style='padding:1em;color:#888;text-align:center;'>Preview is only available for HTML. Please download your code using the download button above.</div>",
2158
- history_output: history_to_chatbot_messages(_history),
2159
- }
2160
- return
2161
  else:
2162
- completion = client.chat.completions.create(
2163
- model=_current_model["id"],
2164
- messages=messages,
2165
- stream=True,
2166
- max_tokens=16384
2167
- )
 
 
 
 
 
 
 
 
 
2168
  content = ""
2169
  for chunk in completion:
2170
  # Handle different response formats for Mistral vs others
 
2057
  messages=messages,
2058
  max_tokens=16384
2059
  )
2060
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2061
  else:
2062
+ # Use max_completion_tokens for GPT-5, max_tokens for others
2063
+ if _current_model["id"] == "gpt-5":
2064
+ completion = client.chat.completions.create(
2065
+ model=_current_model["id"],
2066
+ messages=messages,
2067
+ stream=True,
2068
+ max_completion_tokens=16384
2069
+ )
2070
+ else:
2071
+ completion = client.chat.completions.create(
2072
+ model=_current_model["id"],
2073
+ messages=messages,
2074
+ stream=True,
2075
+ max_tokens=16384
2076
+ )
2077
  content = ""
2078
  for chunk in completion:
2079
  # Handle different response formats for Mistral vs others