ginipick commited on
Commit
9470e9c
Β·
verified Β·
1 Parent(s): a8eb718

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -58
app.py CHANGED
@@ -2,6 +2,8 @@
2
  import os
3
  import gradio as gr
4
  from gradio import ChatMessage
 
 
5
  from huggingface_hub import HfApi
6
  import requests
7
  import re
@@ -9,9 +11,6 @@ import traceback
9
  import time
10
  import threading
11
  import json
12
- import asyncio
13
- from typing import List, Dict, Tuple, Iterator
14
- import google.generativeai as genai
15
 
16
  # HuggingFace κ΄€λ ¨ API ν‚€ (슀페이슀 뢄석 용)
17
  HF_TOKEN = os.getenv("HF_TOKEN")
@@ -44,7 +43,6 @@ def get_file_content(space_id: str, file_path: str) -> str:
44
  def get_space_structure(space_id: str) -> Dict:
45
  try:
46
  files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
47
-
48
  tree = {"type": "directory", "path": "", "name": space_id, "children": []}
49
  for file in files:
50
  path_parts = file.split('/')
@@ -52,7 +50,7 @@ def get_space_structure(space_id: str) -> Dict:
52
  for i, part in enumerate(path_parts):
53
  if i == len(path_parts) - 1: # 파일
54
  current["children"].append({"type": "file", "path": file, "name": part})
55
- else: # 디렉토리
56
  found = False
57
  for child in current["children"]:
58
  if child["type"] == "directory" and child["name"] == part:
@@ -63,7 +61,6 @@ def get_space_structure(space_id: str) -> Dict:
63
  new_dir = {"type": "directory", "path": '/'.join(path_parts[:i+1]), "name": part, "children": []}
64
  current["children"].append(new_dir)
65
  current = new_dir
66
-
67
  return tree
68
  except Exception as e:
69
  print(f"Error in get_space_structure: {str(e)}")
@@ -72,7 +69,6 @@ def get_space_structure(space_id: str) -> Dict:
72
  def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
73
  if "error" in tree_data:
74
  return tree_data["error"]
75
-
76
  formatted = f"{indent}{'πŸ“' if tree_data.get('type') == 'directory' else 'πŸ“„'} {tree_data.get('name', 'Unknown')}\n"
77
  if tree_data.get("type") == "directory":
78
  for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
@@ -80,47 +76,28 @@ def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
80
  return formatted
81
 
82
  def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
83
- """
84
- μ½”λ“œ λ‚΄μš©μ— 따라 lines 수λ₯Ό λ™μ μœΌλ‘œ μ‘°μ •ν•©λ‹ˆλ‹€.
85
-
86
- Parameters:
87
- - code_content (str): μ½”λ“œ ν…μŠ€νŠΈ λ‚΄μš©
88
- - min_lines (int): μ΅œμ†Œ lines 수
89
- - max_lines (int): μ΅œλŒ€ lines 수
90
-
91
- Returns:
92
- - int: μ„€μ •λœ lines 수
93
- """
94
  num_lines = len(code_content.split('\n'))
95
  return min(max(num_lines, min_lines), max_lines)
96
 
97
  def analyze_space(url: str, progress=gr.Progress()):
98
  try:
99
  space_id = url.split('spaces/')[-1]
100
-
101
  if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
102
  raise ValueError(f"Invalid Space ID format: {space_id}")
103
-
104
  progress(0.1, desc="파일 ꡬ쑰 뢄석 쀑...")
105
  tree_structure = get_space_structure(space_id)
106
  if "error" in tree_structure:
107
  raise ValueError(tree_structure["error"])
108
  tree_view = format_tree_structure(tree_structure)
109
-
110
  progress(0.3, desc="app.py λ‚΄μš© κ°€μ Έμ˜€λŠ” 쀑...")
111
  app_content = get_file_content(space_id, "app.py")
112
-
113
  progress(0.5, desc="μ½”λ“œ μš”μ•½ 쀑...")
114
  summary = summarize_code(app_content)
115
-
116
  progress(0.7, desc="μ½”λ“œ 뢄석 쀑...")
117
  analysis = analyze_code(app_content)
118
-
119
  progress(0.9, desc="μ‚¬μš©λ²• μ„€λͺ… 생성 쀑...")
120
  usage = explain_usage(app_content)
121
-
122
  app_py_lines = adjust_lines_for_code(app_content)
123
-
124
  progress(1.0, desc="μ™„λ£Œ")
125
  return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, app_py_lines
126
  except Exception as e:
@@ -131,13 +108,10 @@ def analyze_space(url: str, progress=gr.Progress()):
131
  # --------------------------------------------------
132
  # Gemini 2.0 Flash Thinking λͺ¨λΈ κ΄€λ ¨ 헬퍼 ν•¨μˆ˜λ“€
133
  # --------------------------------------------------
134
- def format_chat_history(messages: list) -> list:
135
- """
136
- Gradio ChatMessage 객체 리슀트λ₯Ό Geminiκ°€ 이해할 수 μžˆλŠ” ν˜•μ‹μœΌλ‘œ λ³€ν™˜ν•©λ‹ˆλ‹€.
137
- """
138
  formatted_history = []
139
  for message in messages:
140
- # thinking λ©”μ‹œμ§€(메타데이터가 μžˆλŠ” λ©”μ‹œμ§€)λŠ” κ±΄λ„ˆλœλ‹ˆλ‹€.
141
  if not (hasattr(message, "metadata") and message.metadata):
142
  formatted_history.append({
143
  "role": "user" if message.role == "user" else "assistant",
@@ -146,10 +120,6 @@ def format_chat_history(messages: list) -> list:
146
  return formatted_history
147
 
148
  def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
149
- """
150
- μ‹œμŠ€ν…œ 및 μœ μ € λ©”μ‹œμ§€λ₯Ό 보내고 슀트리밍 응닡을 λˆ„μ ν•˜μ—¬ μ΅œμ’… ν…μŠ€νŠΈλ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
151
- """
152
- # 초기 λŒ€ν™” 기둝 ꡬ성 (λ‹¨μˆœ λ¬Έμžμ—΄ 기반)
153
  initial_messages = [
154
  ChatMessage(role="system", content=system_message),
155
  ChatMessage(role="user", content=user_message)
@@ -160,7 +130,6 @@ def gemini_chat_completion(system_message: str, user_message: str, max_tokens: i
160
  try:
161
  for chunk in chat.send_message(user_message, stream=True):
162
  parts = chunk.candidates[0].content.parts
163
- # 생각(thinking) λΆ€λΆ„κ³Ό μ΅œμ’… 응닡 ꡬ뢄이 μžˆλ‹€λ©΄ μ΅œμ’… 응닡을 μ‚¬μš©
164
  if len(parts) == 2:
165
  final_response += parts[1].text
166
  else:
@@ -169,7 +138,7 @@ def gemini_chat_completion(system_message: str, user_message: str, max_tokens: i
169
  except Exception as e:
170
  return f"LLM 호좜 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
171
 
172
- def summarize_code(app_content: str):
173
  system_message = "당신은 Python μ½”λ“œλ₯Ό λΆ„μ„ν•˜κ³  μš”μ•½ν•˜λŠ” AI μ‘°μˆ˜μž…λ‹ˆλ‹€. μ£Όμ–΄μ§„ μ½”λ“œλ₯Ό 3쀄 μ΄λ‚΄λ‘œ κ°„κ²°ν•˜κ²Œ μš”μ•½ν•΄μ£Όμ„Έμš”."
174
  user_message = f"λ‹€μŒ Python μ½”λ“œλ₯Ό 3쀄 μ΄λ‚΄λ‘œ μš”μ•½ν•΄μ£Όμ„Έμš”:\n\n{app_content}"
175
  try:
@@ -177,7 +146,7 @@ def summarize_code(app_content: str):
177
  except Exception as e:
178
  return f"μš”μ•½ 생성 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
179
 
180
- def analyze_code(app_content: str):
181
  system_message = (
182
  "당신은 Python μ½”λ“œλ₯Ό λΆ„μ„ν•˜λŠ” AI μ‘°μˆ˜μž…λ‹ˆλ‹€. μ£Όμ–΄μ§„ μ½”λ“œλ₯Ό λΆ„μ„ν•˜μ—¬ λ‹€μŒ ν•­λͺ©μ— λŒ€ν•΄ μ„€λͺ…ν•΄μ£Όμ„Έμš”:\n"
183
  "A. λ°°κ²½ 및 ν•„μš”μ„±\n"
@@ -193,7 +162,7 @@ def analyze_code(app_content: str):
193
  except Exception as e:
194
  return f"뢄석 생성 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
195
 
196
- def explain_usage(app_content: str):
197
  system_message = "당신은 Python μ½”λ“œλ₯Ό λΆ„μ„ν•˜μ—¬ μ‚¬μš©λ²•μ„ μ„€λͺ…ν•˜λŠ” AI μ‘°μˆ˜μž…λ‹ˆλ‹€. μ£Όμ–΄μ§„ μ½”λ“œλ₯Ό λ°”νƒ•μœΌλ‘œ 마치 화면을 λ³΄λŠ” κ²ƒμ²˜λŸΌ μ‚¬μš©λ²•μ„ μƒμ„Ένžˆ μ„€λͺ…ν•΄μ£Όμ„Έμš”. Markdown ν˜•μ‹μœΌλ‘œ 좜λ ₯ν•˜μ„Έμš”."
198
  user_message = f"λ‹€μŒ Python μ½”λ“œμ˜ μ‚¬μš©λ²•μ„ μ„€λͺ…ν•΄μ£Όμ„Έμš”:\n\n{app_content}"
199
  try:
@@ -201,11 +170,28 @@ def explain_usage(app_content: str):
201
  except Exception as e:
202
  return f"μ‚¬μš©λ²• μ„€λͺ… 생성 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
203
 
204
- def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
205
  """
206
- Gemini 2.0 Flash Thinking λͺ¨λΈμ„ μ‚¬μš©ν•˜μ—¬ 슀트리밍 응닡을 μ œκ³΅ν•©λ‹ˆλ‹€.
207
- 이 ν•¨μˆ˜λŠ” Gradio의 ChatMessage 객체λ₯Ό μ‚¬μš©ν•˜λ©°, μ‹€μ‹œκ°„ 'thinking' 및 μ΅œμ’… 응닡을 λ³΄μ—¬μ€λ‹ˆλ‹€.
208
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
  if not user_message.strip():
210
  messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message. Empty input is not allowed."))
211
  yield messages
@@ -214,17 +200,13 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
214
  try:
215
  print(f"\n=== New Request (Text) ===")
216
  print(f"User message: {user_message}")
217
-
218
- # 포맷된 λŒ€ν™” 기둝 생성
219
  chat_history = format_chat_history(messages)
220
  chat = model.start_chat(history=chat_history)
221
  response = chat.send_message(user_message, stream=True)
222
-
223
  thought_buffer = ""
224
  response_buffer = ""
225
  thinking_complete = False
226
 
227
- # 초기 'thinking' λ©”μ‹œμ§€ μΆ”κ°€
228
  messages.append(
229
  ChatMessage(
230
  role="assistant",
@@ -240,7 +222,6 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
240
  if len(parts) == 2 and not thinking_complete:
241
  thought_buffer += current_chunk
242
  print(f"\n=== Complete Thought ===\n{thought_buffer}")
243
-
244
  messages[-1] = ChatMessage(
245
  role="assistant",
246
  content=thought_buffer,
@@ -250,7 +231,6 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
250
 
251
  response_buffer = parts[1].text
252
  print(f"\n=== Starting Response ===\n{response_buffer}")
253
-
254
  messages.append(
255
  ChatMessage(
256
  role="assistant",
@@ -262,7 +242,6 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
262
  elif thinking_complete:
263
  response_buffer += current_chunk
264
  print(f"\n=== Response Chunk ===\n{current_chunk}")
265
-
266
  messages[-1] = ChatMessage(
267
  role="assistant",
268
  content=response_buffer
@@ -271,7 +250,6 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
271
  else:
272
  thought_buffer += current_chunk
273
  print(f"\n=== Thinking Chunk ===\n{current_chunk}")
274
-
275
  messages[-1] = ChatMessage(
276
  role="assistant",
277
  content=thought_buffer,
@@ -291,11 +269,16 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
291
  )
292
  yield messages
293
 
294
- def respond(message: str, history: list) -> Iterator[list]:
295
  """
296
- κΈ°μ‘΄ respond() ν•¨μˆ˜λŠ” stream_gemini_response()λ₯Ό ν˜ΈμΆœν•˜μ—¬ λŒ€ν™” ν˜•μ‹μœΌλ‘œ 슀트리밍 응닡을 μ œκ³΅ν•©λ‹ˆλ‹€.
297
  """
298
- return stream_gemini_response(message, history)
 
 
 
 
 
299
 
300
  # --------------------------------------------------
301
  # Gradio UI ꡬ성
@@ -421,9 +404,11 @@ def create_ui():
421
  )
422
 
423
  with gr.TabItem("AI μ½”λ”©"):
 
424
  chatbot = gr.Chatbot(
425
  label="λŒ€ν™”",
426
- elem_classes="output-group full-height"
 
427
  )
428
 
429
  msg = gr.Textbox(label="λ©”μ‹œμ§€", placeholder="λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”...")
@@ -444,8 +429,9 @@ def create_ui():
444
  gr.Examples(examples, inputs=msg)
445
 
446
  def respond_wrapper(message, chat_history, max_tokens, temperature, top_p):
447
- # stream_gemini_responseκ°€ μ œλ„ˆλ ˆμ΄ν„°λ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
448
- return "", stream_gemini_response(message, chat_history)
 
449
 
450
  msg.submit(respond_wrapper, [msg, chatbot, max_tokens, temperature, top_p], [msg, chatbot])
451
 
@@ -484,11 +470,9 @@ if __name__ == "__main__":
484
  print("Starting HuggingFace Space Analyzer...")
485
  demo = create_ui()
486
  print("UI created successfully.")
487
-
488
  print("Configuring Gradio queue...")
489
  demo.queue()
490
  print("Gradio queue configured.")
491
-
492
  print("Launching Gradio app...")
493
  demo.launch(
494
  server_name="0.0.0.0",
 
2
  import os
3
  import gradio as gr
4
  from gradio import ChatMessage
5
+ from typing import Iterator, List, Dict, Tuple
6
+ import google.generativeai as genai
7
  from huggingface_hub import HfApi
8
  import requests
9
  import re
 
11
  import time
12
  import threading
13
  import json
 
 
 
14
 
15
  # HuggingFace κ΄€λ ¨ API ν‚€ (슀페이슀 뢄석 용)
16
  HF_TOKEN = os.getenv("HF_TOKEN")
 
43
  def get_space_structure(space_id: str) -> Dict:
44
  try:
45
  files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
 
46
  tree = {"type": "directory", "path": "", "name": space_id, "children": []}
47
  for file in files:
48
  path_parts = file.split('/')
 
50
  for i, part in enumerate(path_parts):
51
  if i == len(path_parts) - 1: # 파일
52
  current["children"].append({"type": "file", "path": file, "name": part})
53
+ else:
54
  found = False
55
  for child in current["children"]:
56
  if child["type"] == "directory" and child["name"] == part:
 
61
  new_dir = {"type": "directory", "path": '/'.join(path_parts[:i+1]), "name": part, "children": []}
62
  current["children"].append(new_dir)
63
  current = new_dir
 
64
  return tree
65
  except Exception as e:
66
  print(f"Error in get_space_structure: {str(e)}")
 
69
  def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
70
  if "error" in tree_data:
71
  return tree_data["error"]
 
72
  formatted = f"{indent}{'πŸ“' if tree_data.get('type') == 'directory' else 'πŸ“„'} {tree_data.get('name', 'Unknown')}\n"
73
  if tree_data.get("type") == "directory":
74
  for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
 
76
  return formatted
77
 
78
  def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
 
 
 
 
 
 
 
 
 
 
 
79
  num_lines = len(code_content.split('\n'))
80
  return min(max(num_lines, min_lines), max_lines)
81
 
82
  def analyze_space(url: str, progress=gr.Progress()):
83
  try:
84
  space_id = url.split('spaces/')[-1]
 
85
  if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
86
  raise ValueError(f"Invalid Space ID format: {space_id}")
 
87
  progress(0.1, desc="파일 ꡬ쑰 뢄석 쀑...")
88
  tree_structure = get_space_structure(space_id)
89
  if "error" in tree_structure:
90
  raise ValueError(tree_structure["error"])
91
  tree_view = format_tree_structure(tree_structure)
 
92
  progress(0.3, desc="app.py λ‚΄μš© κ°€μ Έμ˜€λŠ” 쀑...")
93
  app_content = get_file_content(space_id, "app.py")
 
94
  progress(0.5, desc="μ½”λ“œ μš”μ•½ 쀑...")
95
  summary = summarize_code(app_content)
 
96
  progress(0.7, desc="μ½”λ“œ 뢄석 쀑...")
97
  analysis = analyze_code(app_content)
 
98
  progress(0.9, desc="μ‚¬μš©λ²• μ„€λͺ… 생성 쀑...")
99
  usage = explain_usage(app_content)
 
100
  app_py_lines = adjust_lines_for_code(app_content)
 
101
  progress(1.0, desc="μ™„λ£Œ")
102
  return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, app_py_lines
103
  except Exception as e:
 
108
  # --------------------------------------------------
109
  # Gemini 2.0 Flash Thinking λͺ¨λΈ κ΄€λ ¨ 헬퍼 ν•¨μˆ˜λ“€
110
  # --------------------------------------------------
111
+ def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
 
 
 
112
  formatted_history = []
113
  for message in messages:
114
+ # thinking λ©”μ‹œμ§€(메타데이터 μžˆλŠ” λ©”μ‹œμ§€)λŠ” κ±΄λ„ˆλœλ‹ˆλ‹€.
115
  if not (hasattr(message, "metadata") and message.metadata):
116
  formatted_history.append({
117
  "role": "user" if message.role == "user" else "assistant",
 
120
  return formatted_history
121
 
122
  def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
 
 
 
 
123
  initial_messages = [
124
  ChatMessage(role="system", content=system_message),
125
  ChatMessage(role="user", content=user_message)
 
130
  try:
131
  for chunk in chat.send_message(user_message, stream=True):
132
  parts = chunk.candidates[0].content.parts
 
133
  if len(parts) == 2:
134
  final_response += parts[1].text
135
  else:
 
138
  except Exception as e:
139
  return f"LLM 호좜 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
140
 
141
+ def summarize_code(app_content: str) -> str:
142
  system_message = "당신은 Python μ½”λ“œλ₯Ό λΆ„μ„ν•˜κ³  μš”μ•½ν•˜λŠ” AI μ‘°μˆ˜μž…λ‹ˆλ‹€. μ£Όμ–΄μ§„ μ½”λ“œλ₯Ό 3쀄 μ΄λ‚΄λ‘œ κ°„κ²°ν•˜κ²Œ μš”μ•½ν•΄μ£Όμ„Έμš”."
143
  user_message = f"λ‹€μŒ Python μ½”λ“œλ₯Ό 3쀄 μ΄λ‚΄λ‘œ μš”μ•½ν•΄μ£Όμ„Έμš”:\n\n{app_content}"
144
  try:
 
146
  except Exception as e:
147
  return f"μš”μ•½ 생성 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
148
 
149
+ def analyze_code(app_content: str) -> str:
150
  system_message = (
151
  "당신은 Python μ½”λ“œλ₯Ό λΆ„μ„ν•˜λŠ” AI μ‘°μˆ˜μž…λ‹ˆλ‹€. μ£Όμ–΄μ§„ μ½”λ“œλ₯Ό λΆ„μ„ν•˜μ—¬ λ‹€μŒ ν•­λͺ©μ— λŒ€ν•΄ μ„€λͺ…ν•΄μ£Όμ„Έμš”:\n"
152
  "A. λ°°κ²½ 및 ν•„μš”μ„±\n"
 
162
  except Exception as e:
163
  return f"뢄석 생성 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
164
 
165
+ def explain_usage(app_content: str) -> str:
166
  system_message = "당신은 Python μ½”λ“œλ₯Ό λΆ„μ„ν•˜μ—¬ μ‚¬μš©λ²•μ„ μ„€λͺ…ν•˜λŠ” AI μ‘°μˆ˜μž…λ‹ˆλ‹€. μ£Όμ–΄μ§„ μ½”λ“œλ₯Ό λ°”νƒ•μœΌλ‘œ 마치 화면을 λ³΄λŠ” κ²ƒμ²˜λŸΌ μ‚¬μš©λ²•μ„ μƒμ„Ένžˆ μ„€λͺ…ν•΄μ£Όμ„Έμš”. Markdown ν˜•μ‹μœΌλ‘œ 좜λ ₯ν•˜μ„Έμš”."
167
  user_message = f"λ‹€μŒ Python μ½”λ“œμ˜ μ‚¬μš©λ²•μ„ μ„€λͺ…ν•΄μ£Όμ„Έμš”:\n\n{app_content}"
168
  try:
 
170
  except Exception as e:
171
  return f"μ‚¬μš©λ²• μ„€λͺ… 생성 쀑 였λ₯˜ λ°œμƒ: {str(e)}"
172
 
173
+ def convert_chat_history(messages: List[ChatMessage]) -> List[Tuple[str, str]]:
174
  """
175
+ ChatMessage 객체 리슀트λ₯Ό (user, assistant) νŠœν”Œ λͺ©λ‘μœΌλ‘œ λ³€ν™˜ν•©λ‹ˆλ‹€.
 
176
  """
177
+ conv = []
178
+ i = 0
179
+ while i < len(messages):
180
+ if messages[i].role == "user":
181
+ user_text = messages[i].content
182
+ bot_text = ""
183
+ if i + 1 < len(messages) and messages[i+1].role == "assistant":
184
+ bot_text = messages[i+1].content
185
+ i += 2
186
+ else:
187
+ i += 1
188
+ conv.append((user_text, bot_text))
189
+ else:
190
+ conv.append(("", messages[i].content))
191
+ i += 1
192
+ return conv
193
+
194
+ def stream_gemini_response(user_message: str, messages: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
195
  if not user_message.strip():
196
  messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message. Empty input is not allowed."))
197
  yield messages
 
200
  try:
201
  print(f"\n=== New Request (Text) ===")
202
  print(f"User message: {user_message}")
 
 
203
  chat_history = format_chat_history(messages)
204
  chat = model.start_chat(history=chat_history)
205
  response = chat.send_message(user_message, stream=True)
 
206
  thought_buffer = ""
207
  response_buffer = ""
208
  thinking_complete = False
209
 
 
210
  messages.append(
211
  ChatMessage(
212
  role="assistant",
 
222
  if len(parts) == 2 and not thinking_complete:
223
  thought_buffer += current_chunk
224
  print(f"\n=== Complete Thought ===\n{thought_buffer}")
 
225
  messages[-1] = ChatMessage(
226
  role="assistant",
227
  content=thought_buffer,
 
231
 
232
  response_buffer = parts[1].text
233
  print(f"\n=== Starting Response ===\n{response_buffer}")
 
234
  messages.append(
235
  ChatMessage(
236
  role="assistant",
 
242
  elif thinking_complete:
243
  response_buffer += current_chunk
244
  print(f"\n=== Response Chunk ===\n{current_chunk}")
 
245
  messages[-1] = ChatMessage(
246
  role="assistant",
247
  content=response_buffer
 
250
  else:
251
  thought_buffer += current_chunk
252
  print(f"\n=== Thinking Chunk ===\n{current_chunk}")
 
253
  messages[-1] = ChatMessage(
254
  role="assistant",
255
  content=thought_buffer,
 
269
  )
270
  yield messages
271
 
272
+ def respond(message: str, history: List[ChatMessage]) -> Iterator[List[Tuple[str, str]]]:
273
  """
274
+ 기쑴의 stream_gemini_response()λ₯Ό ν˜ΈμΆœν•œ ν›„, 좜λ ₯ κ²°κ³Όλ₯Ό νŠœν”Œ λͺ©λ‘μœΌλ‘œ λ³€ν™˜ν•˜μ—¬ λ°˜ν™˜ν•©λ‹ˆλ‹€.
275
  """
276
+ for updated_messages in stream_gemini_response(message, history):
277
+ yield convert_chat_history(updated_messages)
278
+
279
+ def user_message(msg: str, history: List[ChatMessage]) -> Tuple[str, List[ChatMessage]]:
280
+ history.append(ChatMessage(role="user", content=msg))
281
+ return "", history
282
 
283
  # --------------------------------------------------
284
  # Gradio UI ꡬ성
 
404
  )
405
 
406
  with gr.TabItem("AI μ½”λ”©"):
407
+ # μ±„νŒ… λ°•μŠ€ 높이λ₯Ό 400px둜 μ§€μ •ν•˜μ—¬ ν™”λ©΄ 높이에 맞게 μ€„μž„.
408
  chatbot = gr.Chatbot(
409
  label="λŒ€ν™”",
410
+ elem_classes="output-group",
411
+ height=400
412
  )
413
 
414
  msg = gr.Textbox(label="λ©”μ‹œμ§€", placeholder="λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”...")
 
429
  gr.Examples(examples, inputs=msg)
430
 
431
  def respond_wrapper(message, chat_history, max_tokens, temperature, top_p):
432
+ # λ°˜ν™˜λ˜λŠ” μ œλ„ˆλ ˆμ΄ν„°μ˜ 각 λ‹¨κ³„λ§ˆλ‹€ μ±„νŒ… 기둝을 νŠœν”Œ λͺ©λ‘μœΌλ‘œ λ³€ν™˜ν•©λ‹ˆλ‹€.
433
+ for updated in stream_gemini_response(message, chat_history):
434
+ yield "", convert_chat_history(updated)
435
 
436
  msg.submit(respond_wrapper, [msg, chatbot, max_tokens, temperature, top_p], [msg, chatbot])
437
 
 
470
  print("Starting HuggingFace Space Analyzer...")
471
  demo = create_ui()
472
  print("UI created successfully.")
 
473
  print("Configuring Gradio queue...")
474
  demo.queue()
475
  print("Gradio queue configured.")
 
476
  print("Launching Gradio app...")
477
  demo.launch(
478
  server_name="0.0.0.0",