ginipick commited on
Commit
4e8ea60
ยท
verified ยท
1 Parent(s): 9696775

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -70
app.py CHANGED
@@ -17,11 +17,13 @@ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
17
  genai.configure(api_key=GEMINI_API_KEY)
18
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
19
 
 
20
  def get_headers():
21
  if not HF_TOKEN:
22
  raise ValueError("Hugging Face token not found in environment variables")
23
  return {"Authorization": f"Bearer {HF_TOKEN}"}
24
 
 
25
  def get_file_content(space_id: str, file_path: str) -> str:
26
  file_url = f"https://huggingface.co/spaces/{space_id}/raw/main/{file_path}"
27
  try:
@@ -33,6 +35,7 @@ def get_file_content(space_id: str, file_path: str) -> str:
33
  except requests.RequestException:
34
  return f"Error fetching content for file: {file_path}"
35
 
 
36
  def get_space_structure(space_id: str) -> Dict:
37
  try:
38
  files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
@@ -59,55 +62,78 @@ def get_space_structure(space_id: str) -> Dict:
59
  print(f"Error in get_space_structure: {str(e)}")
60
  return {"error": f"API request error: {str(e)}"}
61
 
 
62
  def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
63
  if "error" in tree_data:
64
  return tree_data["error"]
65
  formatted = f"{indent}{'๐Ÿ“' if tree_data.get('type') == 'directory' else '๐Ÿ“„'} {tree_data.get('name', 'Unknown')}\n"
66
  if tree_data.get("type") == "directory":
 
67
  for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
68
  formatted += format_tree_structure(child, indent + " ")
69
  return formatted
70
 
 
71
  def analyze_space(url: str, progress=gr.Progress()):
 
 
 
 
 
 
 
72
  try:
73
  space_id = url.split('spaces/')[-1]
74
  if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
75
  raise ValueError(f"Invalid Space ID format: {space_id}")
 
76
  progress(0.1, desc="ํŒŒ์ผ ๊ตฌ์กฐ ๋ถ„์„ ์ค‘...")
77
  tree_structure = get_space_structure(space_id)
78
  if "error" in tree_structure:
79
  raise ValueError(tree_structure["error"])
80
  tree_view = format_tree_structure(tree_structure)
 
81
  progress(0.3, desc="app.py ๋‚ด์šฉ ๊ฐ€์ ธ์˜ค๋Š” ์ค‘...")
82
  app_content = get_file_content(space_id, "app.py")
 
83
  progress(0.5, desc="์ฝ”๋“œ ์š”์•ฝ ์ค‘...")
84
  summary = summarize_code(app_content)
 
85
  progress(0.7, desc="์ฝ”๋“œ ๋ถ„์„ ์ค‘...")
86
  analysis = analyze_code(app_content)
 
87
  progress(0.9, desc="์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘...")
88
  usage = explain_usage(app_content)
 
89
  # lines ์ˆ˜ ์กฐ์ •
90
  lines_for_app_py = adjust_lines_for_code(app_content)
 
91
  progress(1.0, desc="์™„๋ฃŒ")
92
  return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, lines_for_app_py
 
93
  except Exception as e:
94
  print(f"Error in analyze_space: {str(e)}")
95
  print(traceback.format_exc())
96
  return f"์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}", "", None, "", "", "", "", 10
97
 
 
98
  def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
 
 
 
99
  num_lines = len(code_content.split('\n'))
100
  return min(max(num_lines, min_lines), max_lines)
101
 
102
- # -----------------------------
103
- # LLM (Gemini) ๊ด€๋ จ ํ•จ์ˆ˜
104
- # -----------------------------
 
105
  from gradio import ChatMessage
106
 
107
  def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
108
  """
109
  ChatMessage ๋ชฉ๋ก์„ Gemini ๋ชจ๋ธ์ด ์ดํ•ดํ•  ์ˆ˜ ์žˆ๋Š” ํ˜•์‹์œผ๋กœ ๋ณ€ํ™˜
110
- (Thinking ๋ฉ”ํƒ€๋ฐ์ดํ„ฐ ํฌํ•จ ๋ฉ”์‹œ์ง€๋Š” ์ œ์™ธ)
111
  """
112
  formatted = []
113
  for m in messages:
@@ -118,15 +144,12 @@ def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
118
  formatted.append({"role": role, "parts": [m.content or ""]})
119
  return formatted
120
 
121
- import google.generativeai as genai
122
 
123
- def gemini_chat_completion(
124
- system_message: str,
125
- user_message: str,
126
- max_tokens: int = 200,
127
- temperature: float = 0.7
128
- ) -> str:
129
- # ChatMessage ๊ฐ์ฒด๋กœ ๋ณ€ํ™˜
130
  init_msgs = [
131
  ChatMessage(role="system", content=system_message),
132
  ChatMessage(role="user", content=user_message)
@@ -138,15 +161,15 @@ def gemini_chat_completion(
138
  for chunk in chat.send_message(user_message, stream=True):
139
  parts = chunk.candidates[0].content.parts
140
  if len(parts) == 2:
141
- # Thinking + ์ตœ์ข…์‘๋‹ต ํ˜•ํƒœ๋กœ ๋“ค์–ด์˜จ ๊ฒฝ์šฐ
142
  final += parts[1].text
143
  else:
144
- # ๊ทธ๋ƒฅ ์‘๋‹ต
145
  final += parts[0].text
146
  return final.strip()
147
  except Exception as e:
148
  return f"LLM ํ˜ธ์ถœ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
149
 
 
150
  def summarize_code(app_content: str):
151
  system_msg = "๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜๊ณ  ์š”์•ฝํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ 3์ค„ ์ด๋‚ด๋กœ ๊ฐ„๊ฒฐํ•˜๊ฒŒ ์š”์•ฝํ•ด์ฃผ์„ธ์š”."
152
  user_msg = f"๋‹ค์Œ Python ์ฝ”๋“œ๋ฅผ 3์ค„ ์ด๋‚ด๋กœ ์š”์•ฝํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
@@ -155,6 +178,7 @@ def summarize_code(app_content: str):
155
  except Exception as e:
156
  return f"์š”์•ฝ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
157
 
 
158
  def analyze_code(app_content: str):
159
  # ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ์— '๋”ฅ์”ฝํ‚น' ์•ˆ๋‚ด๋ฌธ ์ถ”๊ฐ€
160
  system_msg = (
@@ -175,6 +199,7 @@ def analyze_code(app_content: str):
175
  except Exception as e:
176
  return f"๋ถ„์„ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
177
 
 
178
  def explain_usage(app_content: str):
179
  # ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ์— '๋”ฅ์”ฝํ‚น' ์•ˆ๋‚ด๋ฌธ ์ถ”๊ฐ€
180
  system_msg = (
@@ -189,21 +214,17 @@ def explain_usage(app_content: str):
189
  except Exception as e:
190
  return f"์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
191
 
192
- # -----------------------------
193
- # ์‹ค์ œ ๋Œ€ํ™” ์ŠคํŠธ๋ฆฌ๋ฐ
194
- # -----------------------------
195
  def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
196
  """
197
  conversation_state: ChatMessage ๊ฐ์ฒด๋กœ๋งŒ ์ด๋ฃจ์–ด์ง„ '๋Œ€ํ™” ์ด๋ ฅ' (Gradio State).
 
198
  """
199
- if not user_message.strip():
200
- conversation_state.append(
201
- ChatMessage(role="assistant", content="Please provide a non-empty text message. Empty input is not allowed.")
202
- )
203
- yield conversation_state
204
- return
205
 
206
- print(f"\n=== New Request ===\nUser message: {user_message}")
207
 
208
  # ๊ธฐ์กด ๋Œ€ํ™”๋ฅผ Gemini ํ˜•์‹์œผ๋กœ ๋ณ€ํ™˜
209
  chat_history = format_chat_history(conversation_state)
@@ -223,49 +244,57 @@ def stream_gemini_response(user_message: str, conversation_state: List[ChatMessa
223
  )
224
  )
225
 
226
- for chunk in response:
227
- parts = chunk.candidates[0].content.parts
228
- current_chunk = parts[0].text
229
-
230
- if len(parts) == 2 and not thinking_complete:
231
- # ์ฒซ ๋ฒˆ์งธ๊ฐ€ '์ƒ๊ฐ', ๋‘ ๋ฒˆ์งธ๊ฐ€ ์ตœ์ข… ์‘๋‹ต
232
- thought_buffer += current_chunk
233
- print(f"\n=== Complete Thought ===\n{thought_buffer}")
234
- conversation_state[-1] = ChatMessage(
235
- role="assistant",
236
- content=thought_buffer,
237
- metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
238
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  yield conversation_state
240
 
241
- response_buffer = parts[1].text
242
- print(f"\n=== Starting Response ===\n{response_buffer}")
243
- conversation_state.append(
244
- ChatMessage(role="assistant", content=response_buffer)
245
- )
246
- thinking_complete = True
247
-
248
- elif thinking_complete:
249
- # ์ด๋ฏธ ์ƒ๊ฐ์ด ๋๋‚ฌ์œผ๋ฏ€๋กœ ์ตœ์ข… ์‘๋‹ต ๋ˆ„์ 
250
- response_buffer += current_chunk
251
- print(f"\n=== Response Chunk ===\n{current_chunk}")
252
- conversation_state[-1] = ChatMessage(
253
- role="assistant",
254
- content=response_buffer
255
- )
256
 
257
- else:
258
- # ์•„์ง ์ƒ๊ฐ ์ค‘
259
- thought_buffer += current_chunk
260
- print(f"\n=== Thinking Chunk ===\n{current_chunk}")
261
- conversation_state[-1] = ChatMessage(
262
  role="assistant",
263
- content=thought_buffer,
264
- metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
265
  )
 
266
  yield conversation_state
267
 
268
- print(f"\n=== Final Response ===\n{response_buffer}")
269
 
270
  def convert_to_display_tuples(messages: List[ChatMessage]) -> List[Tuple[str, str]]:
271
  """
@@ -289,28 +318,31 @@ def convert_to_display_tuples(messages: List[ChatMessage]) -> List[Tuple[str, st
289
  i += 1
290
  return result
291
 
 
292
  def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
293
  """
294
  ์‚ฌ์šฉ์ž๊ฐ€ ๋ฉ”์‹œ์ง€๋ฅผ ์ž…๋ ฅํ•  ๋•Œ ํ˜ธ์ถœ.
295
  ChatMessage ๋ฆฌ์ŠคํŠธ(conversation_state)์— user ๋ฉ”์‹œ์ง€๋ฅผ ์ถ”๊ฐ€ํ•œ ๋’ค ๋ฐ˜ํ™˜.
296
  """
297
  conversation_state.append(ChatMessage(role="user", content=msg))
 
298
  return "", conversation_state
299
 
 
300
  def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
301
  """
302
  ์œ ์ € ๋ฉ”์‹œ์ง€๋ฅผ ๋ฐ›์•„ Gemini์—๊ฒŒ ์š”์ฒญ(์ŠคํŠธ๋ฆฌ๋ฐ)ํ•˜๊ณ , ๋Œ€ํ™” ์ด๋ ฅ์„ ์—…๋ฐ์ดํŠธ ํ›„
303
  ํ™”๋ฉด์—๋Š” (user, assistant) ํŠœํ”Œ์„ ๋ฐ˜ํ™˜ํ•œ๋‹ค.
304
  """
305
- # ์ŠคํŠธ๋ฆฌ๋ฐ ์‘๋‹ต
306
  for updated_messages in stream_gemini_response(message, conversation_state):
307
  # ํ™”๋ฉด ํ‘œ์‹œ์šฉ (user, assistant) ํŠœํ”Œ๋กœ ๋ณ€ํ™˜
308
  yield "", convert_to_display_tuples(updated_messages)
309
 
310
- # --------------------------------------------------
311
- # Gradio UI
312
- # --------------------------------------------------
313
  def create_ui():
 
 
 
314
  try:
315
  css = """
316
  footer {visibility: hidden;}
@@ -391,17 +423,17 @@ def create_ui():
391
  conversation_state = gr.State([])
392
 
393
  # ์ด๋ฒคํŠธ ์ฒด์ธ
394
- # 1) ์œ ์ € ๋ฉ”์‹œ์ง€ ์ž…๋ ฅ -> user_submit_message -> (์ž…๋ ฅ์ฐฝ ๋น„์šฐ๊ณ , ChatMessage์ถ”๊ฐ€)
395
- # 2) respond_wrapper ํ˜ธ์ถœ -> Gemini ์ŠคํŠธ๋ฆฌ๋ฐ -> ChatMessage ๊ฐฑ์‹  -> ํŠœํ”Œ ๋ณ€ํ™˜ํ•˜์—ฌ chatbot์— ํ‘œ์‹œ
396
  msg.submit(
397
  user_submit_message,
398
  inputs=[msg, conversation_state],
399
- outputs=[msg, conversation_state],
400
  queue=False
401
  ).then(
402
  respond_wrapper,
403
  inputs=[msg, conversation_state, max_tokens, temperature, top_p],
404
- outputs=[msg, chatbot], # msg๋Š” ๋น„์›Œ์ฃผ๊ณ , chatbot์—๋Š” (user,assistant) ํŠœํ”Œ ๋ฆฌ์ŠคํŠธ
405
  )
406
 
407
  with gr.TabItem("Recommended Best"):
@@ -432,7 +464,7 @@ def create_ui():
432
  inputs=[space_id_state],
433
  outputs=[requirements_content]
434
  ).then(
435
- lambda lines: gr.update(lines=lines),
436
  inputs=[app_py_content_lines],
437
  outputs=[app_py_content]
438
  )
@@ -444,13 +476,26 @@ def create_ui():
444
  print(traceback.format_exc())
445
  raise
446
 
 
447
  if __name__ == "__main__":
448
  try:
449
  print("Starting HuggingFace Space Analyzer...")
450
  demo = create_ui()
 
 
451
  demo.queue()
452
- demo.launch(server_name="0.0.0.0", server_port=7860, share=False, debug=True, show_api=False)
 
 
 
 
 
 
 
 
 
453
  except Exception as e:
454
  print(f"Error in main: {str(e)}")
 
455
  print(traceback.format_exc())
456
  raise
 
17
  genai.configure(api_key=GEMINI_API_KEY)
18
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
19
 
20
+
21
  def get_headers():
22
  if not HF_TOKEN:
23
  raise ValueError("Hugging Face token not found in environment variables")
24
  return {"Authorization": f"Bearer {HF_TOKEN}"}
25
 
26
+
27
  def get_file_content(space_id: str, file_path: str) -> str:
28
  file_url = f"https://huggingface.co/spaces/{space_id}/raw/main/{file_path}"
29
  try:
 
35
  except requests.RequestException:
36
  return f"Error fetching content for file: {file_path}"
37
 
38
+
39
  def get_space_structure(space_id: str) -> Dict:
40
  try:
41
  files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
 
62
  print(f"Error in get_space_structure: {str(e)}")
63
  return {"error": f"API request error: {str(e)}"}
64
 
65
+
66
  def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
67
  if "error" in tree_data:
68
  return tree_data["error"]
69
  formatted = f"{indent}{'๐Ÿ“' if tree_data.get('type') == 'directory' else '๐Ÿ“„'} {tree_data.get('name', 'Unknown')}\n"
70
  if tree_data.get("type") == "directory":
71
+ # ๋””๋ ‰ํ† ๋ฆฌ๋ฅผ ๋จผ์ €, ํŒŒ์ผ์„ ๋‚˜์ค‘์— ํ‘œ์‹œํ•˜๊ธฐ ์œ„ํ•ด ์ •๋ ฌ ์กฐ๊ฑด ์‚ฌ์šฉ
72
  for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
73
  formatted += format_tree_structure(child, indent + " ")
74
  return formatted
75
 
76
+
77
  def analyze_space(url: str, progress=gr.Progress()):
78
+ """
79
+ HuggingFace Space์˜ app.py์™€ ํŒŒ์ผ๊ตฌ์กฐ ๋“ฑ์„ ๋ถˆ๋Ÿฌ์™€์„œ:
80
+ 1) ์ฝ”๋“œ ์š”์•ฝ
81
+ 2) ์ฝ”๋“œ ๋ถ„์„
82
+ 3) ์‚ฌ์šฉ๋ฒ•
83
+ ๋“ฑ์„ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
84
+ """
85
  try:
86
  space_id = url.split('spaces/')[-1]
87
  if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
88
  raise ValueError(f"Invalid Space ID format: {space_id}")
89
+
90
  progress(0.1, desc="ํŒŒ์ผ ๊ตฌ์กฐ ๋ถ„์„ ์ค‘...")
91
  tree_structure = get_space_structure(space_id)
92
  if "error" in tree_structure:
93
  raise ValueError(tree_structure["error"])
94
  tree_view = format_tree_structure(tree_structure)
95
+
96
  progress(0.3, desc="app.py ๋‚ด์šฉ ๊ฐ€์ ธ์˜ค๋Š” ์ค‘...")
97
  app_content = get_file_content(space_id, "app.py")
98
+
99
  progress(0.5, desc="์ฝ”๋“œ ์š”์•ฝ ์ค‘...")
100
  summary = summarize_code(app_content)
101
+
102
  progress(0.7, desc="์ฝ”๋“œ ๋ถ„์„ ์ค‘...")
103
  analysis = analyze_code(app_content)
104
+
105
  progress(0.9, desc="์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘...")
106
  usage = explain_usage(app_content)
107
+
108
  # lines ์ˆ˜ ์กฐ์ •
109
  lines_for_app_py = adjust_lines_for_code(app_content)
110
+
111
  progress(1.0, desc="์™„๋ฃŒ")
112
  return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, lines_for_app_py
113
+
114
  except Exception as e:
115
  print(f"Error in analyze_space: {str(e)}")
116
  print(traceback.format_exc())
117
  return f"์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}", "", None, "", "", "", "", 10
118
 
119
+
120
  def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
121
+ """
122
+ ์ฝ”๋“œ์˜ ์ค„ ์ˆ˜์— ๋งž์ถฐ ํ‘œ์‹œํ•  lines ์ˆ˜๋ฅผ ๋™์ ์œผ๋กœ ์กฐ์ •ํ•ฉ๋‹ˆ๋‹ค.
123
+ """
124
  num_lines = len(code_content.split('\n'))
125
  return min(max(num_lines, min_lines), max_lines)
126
 
127
+
128
+ # --------------------------------------------------
129
+ # Gemini 2.0 Flash Thinking ๋ชจ๋ธ ๊ด€๋ จ (LLM) ํ•จ์ˆ˜๋“ค
130
+ # --------------------------------------------------
131
  from gradio import ChatMessage
132
 
133
  def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
134
  """
135
  ChatMessage ๋ชฉ๋ก์„ Gemini ๋ชจ๋ธ์ด ์ดํ•ดํ•  ์ˆ˜ ์žˆ๋Š” ํ˜•์‹์œผ๋กœ ๋ณ€ํ™˜
136
+ (Thinking ๋ฉ”ํƒ€๋ฐ์ดํ„ฐ ํฌํ•จ ๋ฉ”์‹œ์ง€๋Š” ๋ฌด์‹œ)
137
  """
138
  formatted = []
139
  for m in messages:
 
144
  formatted.append({"role": role, "parts": [m.content or ""]})
145
  return formatted
146
 
 
147
 
148
+ def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
149
+ """
150
+ ์‹œ์Šคํ…œ ๋ฉ”์‹œ์ง€์™€ ์œ ์ € ๋ฉ”์‹œ์ง€๋ฅผ ๋ฐ›์•„ Gemini์— ์ŠคํŠธ๋ฆฌ๋ฐ ์š”์ฒญ,
151
+ ์ตœ์ข… ์‘๋‹ต ํ…์ŠคํŠธ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
152
+ """
 
 
153
  init_msgs = [
154
  ChatMessage(role="system", content=system_message),
155
  ChatMessage(role="user", content=user_message)
 
161
  for chunk in chat.send_message(user_message, stream=True):
162
  parts = chunk.candidates[0].content.parts
163
  if len(parts) == 2:
164
+ # Thinking + ์ตœ์ข…์‘๋‹ต
165
  final += parts[1].text
166
  else:
 
167
  final += parts[0].text
168
  return final.strip()
169
  except Exception as e:
170
  return f"LLM ํ˜ธ์ถœ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
171
 
172
+
173
  def summarize_code(app_content: str):
174
  system_msg = "๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜๊ณ  ์š”์•ฝํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ 3์ค„ ์ด๋‚ด๋กœ ๊ฐ„๊ฒฐํ•˜๊ฒŒ ์š”์•ฝํ•ด์ฃผ์„ธ์š”."
175
  user_msg = f"๋‹ค์Œ Python ์ฝ”๋“œ๋ฅผ 3์ค„ ์ด๋‚ด๋กœ ์š”์•ฝํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
 
178
  except Exception as e:
179
  return f"์š”์•ฝ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
180
 
181
+
182
  def analyze_code(app_content: str):
183
  # ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ์— '๋”ฅ์”ฝํ‚น' ์•ˆ๋‚ด๋ฌธ ์ถ”๊ฐ€
184
  system_msg = (
 
199
  except Exception as e:
200
  return f"๋ถ„์„ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
201
 
202
+
203
  def explain_usage(app_content: str):
204
  # ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ์— '๋”ฅ์”ฝํ‚น' ์•ˆ๋‚ด๋ฌธ ์ถ”๊ฐ€
205
  system_msg = (
 
214
  except Exception as e:
215
  return f"์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
216
 
217
+
 
 
218
  def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
219
  """
220
  conversation_state: ChatMessage ๊ฐ์ฒด๋กœ๋งŒ ์ด๋ฃจ์–ด์ง„ '๋Œ€ํ™” ์ด๋ ฅ' (Gradio State).
221
+ (์ˆ˜์ •) ๋นˆ ๋ฌธ์ž์—ด์ด์–ด๋„ ์ฒ˜๋ฆฌํ•˜๋„๋ก ๋ณ€๊ฒฝ. ์—๋Ÿฌ๋ฅผ ๋„์šฐ์ง€ ์•Š์Œ.
222
  """
223
+ # ๊ธฐ์กด์—๋Š” if not user_message.strip(): ... return ํ–ˆ์œผ๋‚˜,
224
+ # "Please provide a non-empty text message..." ์˜ค๋ฅ˜๊ฐ€ ๋ถˆํŽธํ•˜๋‹ค๋Š” ์š”์ฒญ์œผ๋กœ ์ œ๊ฑฐ/์™„ํ™”ํ•จ.
225
+ # ํ•„์š”ํ•˜๋‹ค๋ฉด user_message๊ฐ€ ์ •๋ง ์•„๋ฌด๊ฒƒ๋„ ์—†์„ ๋•Œ ์ฒ˜๋ฆฌ ๋กœ์ง์„ ์ถ”๊ฐ€ํ•˜์„ธ์š”.
 
 
 
226
 
227
+ print(f"\n=== New Request ===\nUser message: {user_message if user_message.strip() else '(Empty)'}")
228
 
229
  # ๊ธฐ์กด ๋Œ€ํ™”๋ฅผ Gemini ํ˜•์‹์œผ๋กœ ๋ณ€ํ™˜
230
  chat_history = format_chat_history(conversation_state)
 
244
  )
245
  )
246
 
247
+ try:
248
+ for chunk in response:
249
+ parts = chunk.candidates[0].content.parts
250
+ current_chunk = parts[0].text
251
+
252
+ if len(parts) == 2 and not thinking_complete:
253
+ thought_buffer += current_chunk
254
+ print(f"\n=== Complete Thought ===\n{thought_buffer}")
255
+ conversation_state[-1] = ChatMessage(
256
+ role="assistant",
257
+ content=thought_buffer,
258
+ metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
259
+ )
260
+ yield conversation_state
261
+
262
+ response_buffer = parts[1].text
263
+ print(f"\n=== Starting Response ===\n{response_buffer}")
264
+ conversation_state.append(
265
+ ChatMessage(role="assistant", content=response_buffer)
266
+ )
267
+ thinking_complete = True
268
+
269
+ elif thinking_complete:
270
+ response_buffer += current_chunk
271
+ print(f"\n=== Response Chunk ===\n{current_chunk}")
272
+ conversation_state[-1] = ChatMessage(
273
+ role="assistant",
274
+ content=response_buffer
275
+ )
276
+ else:
277
+ thought_buffer += current_chunk
278
+ print(f"\n=== Thinking Chunk ===\n{current_chunk}")
279
+ conversation_state[-1] = ChatMessage(
280
+ role="assistant",
281
+ content=thought_buffer,
282
+ metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
283
+ )
284
  yield conversation_state
285
 
286
+ print(f"\n=== Final Response ===\n{response_buffer}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
 
288
+ except Exception as e:
289
+ print(f"\n=== Error ===\n{str(e)}")
290
+ conversation_state.append(
291
+ ChatMessage(
 
292
  role="assistant",
293
+ content=f"I apologize, but I encountered an error: {str(e)}"
 
294
  )
295
+ )
296
  yield conversation_state
297
 
 
298
 
299
  def convert_to_display_tuples(messages: List[ChatMessage]) -> List[Tuple[str, str]]:
300
  """
 
318
  i += 1
319
  return result
320
 
321
+
322
  def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
323
  """
324
  ์‚ฌ์šฉ์ž๊ฐ€ ๋ฉ”์‹œ์ง€๋ฅผ ์ž…๋ ฅํ•  ๋•Œ ํ˜ธ์ถœ.
325
  ChatMessage ๋ฆฌ์ŠคํŠธ(conversation_state)์— user ๋ฉ”์‹œ์ง€๋ฅผ ์ถ”๊ฐ€ํ•œ ๋’ค ๋ฐ˜ํ™˜.
326
  """
327
  conversation_state.append(ChatMessage(role="user", content=msg))
328
+ # ์ž…๋ ฅ์ฐฝ์€ ๋น„์›Œ์คŒ
329
  return "", conversation_state
330
 
331
+
332
  def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
333
  """
334
  ์œ ์ € ๋ฉ”์‹œ์ง€๋ฅผ ๋ฐ›์•„ Gemini์—๊ฒŒ ์š”์ฒญ(์ŠคํŠธ๋ฆฌ๋ฐ)ํ•˜๊ณ , ๋Œ€ํ™” ์ด๋ ฅ์„ ์—…๋ฐ์ดํŠธ ํ›„
335
  ํ™”๋ฉด์—๋Š” (user, assistant) ํŠœํ”Œ์„ ๋ฐ˜ํ™˜ํ•œ๋‹ค.
336
  """
 
337
  for updated_messages in stream_gemini_response(message, conversation_state):
338
  # ํ™”๋ฉด ํ‘œ์‹œ์šฉ (user, assistant) ํŠœํ”Œ๋กœ ๋ณ€ํ™˜
339
  yield "", convert_to_display_tuples(updated_messages)
340
 
341
+
 
 
342
  def create_ui():
343
+ """
344
+ Gradio UI๋ฅผ ๊ตฌ์„ฑํ•˜๋Š” ํ•จ์ˆ˜
345
+ """
346
  try:
347
  css = """
348
  footer {visibility: hidden;}
 
423
  conversation_state = gr.State([])
424
 
425
  # ์ด๋ฒคํŠธ ์ฒด์ธ
426
+ # 1) ์œ ์ € ๋ฉ”์‹œ์ง€ -> user_submit_message -> (์ž…๋ ฅ์ฐฝ ๋น„์›€ + state์ถ”๊ฐ€)
427
+ # 2) respond_wrapper -> Gemini ์ŠคํŠธ๋ฆฌ๋ฐ -> ๋Œ€ํ™” state ๊ฐฑ์‹  -> (user,assistant) ํŠœํ”Œ ๋ณ€ํ™˜
428
  msg.submit(
429
  user_submit_message,
430
  inputs=[msg, conversation_state],
431
+ outputs=[msg, conversation_state],
432
  queue=False
433
  ).then(
434
  respond_wrapper,
435
  inputs=[msg, conversation_state, max_tokens, temperature, top_p],
436
+ outputs=[msg, chatbot],
437
  )
438
 
439
  with gr.TabItem("Recommended Best"):
 
464
  inputs=[space_id_state],
465
  outputs=[requirements_content]
466
  ).then(
467
+ lambda lines: gr.update(lines=lines),
468
  inputs=[app_py_content_lines],
469
  outputs=[app_py_content]
470
  )
 
476
  print(traceback.format_exc())
477
  raise
478
 
479
+
480
  if __name__ == "__main__":
481
  try:
482
  print("Starting HuggingFace Space Analyzer...")
483
  demo = create_ui()
484
+ print("UI created successfully.")
485
+ print("Configuring Gradio queue...")
486
  demo.queue()
487
+ print("Gradio queue configured.")
488
+ print("Launching Gradio app...")
489
+ demo.launch(
490
+ server_name="0.0.0.0",
491
+ server_port=7860,
492
+ share=False,
493
+ debug=True,
494
+ show_api=False
495
+ )
496
+ print("Gradio app launched successfully.")
497
  except Exception as e:
498
  print(f"Error in main: {str(e)}")
499
+ print("Detailed error information:")
500
  print(traceback.format_exc())
501
  raise