ginipick commited on
Commit
eac18b7
Β·
verified Β·
1 Parent(s): 10e6915

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -38
app.py CHANGED
@@ -68,7 +68,7 @@ def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
68
  return tree_data["error"]
69
  formatted = f"{indent}{'πŸ“' if tree_data.get('type') == 'directory' else 'πŸ“„'} {tree_data.get('name', 'Unknown')}\n"
70
  if tree_data.get("type") == "directory":
71
- # 디렉토리λ₯Ό λ¨Όμ €, νŒŒμΌμ„ λ‚˜μ€‘μ— ν‘œμ‹œν•˜κΈ° μœ„ν•΄ μ •λ ¬ 쑰건 μ‚¬μš©
72
  for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
73
  formatted += format_tree_structure(child, indent + " ")
74
  return formatted
@@ -105,7 +105,6 @@ def analyze_space(url: str, progress=gr.Progress()):
105
  progress(0.9, desc="μ‚¬μš©λ²• μ„€λͺ… 생성 쀑...")
106
  usage = explain_usage(app_content)
107
 
108
- # lines 수 μ‘°μ •
109
  lines_for_app_py = adjust_lines_for_code(app_content)
110
 
111
  progress(1.0, desc="μ™„λ£Œ")
@@ -124,31 +123,30 @@ def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int
124
  num_lines = len(code_content.split('\n'))
125
  return min(max(num_lines, min_lines), max_lines)
126
 
127
-
128
  # --------------------------------------------------
129
- # Gemini 2.0 Flash Thinking λͺ¨λΈ κ΄€λ ¨ (LLM) ν•¨μˆ˜λ“€
130
  # --------------------------------------------------
131
  from gradio import ChatMessage
132
 
133
  def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
134
  """
135
  ChatMessage λͺ©λ‘μ„ Gemini λͺ¨λΈμ΄ 이해할 수 μžˆλŠ” ν˜•μ‹μœΌλ‘œ λ³€ν™˜
136
- (Thinking 메타데이터 포함 λ©”μ‹œμ§€λŠ” λ¬΄μ‹œ)
137
  """
138
  formatted = []
139
  for m in messages:
140
- # 'Thinking' metadataκ°€ 있으면 λ¬΄μ‹œ
141
- if hasattr(m, "metadata") and m.metadata:
142
  continue
143
  role = "assistant" if m.role == "assistant" else "user"
144
  formatted.append({"role": role, "parts": [m.content or ""]})
145
  return formatted
146
 
147
 
 
 
148
  def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
149
  """
150
- μ‹œμŠ€ν…œ λ©”μ‹œμ§€μ™€ μœ μ € λ©”μ‹œμ§€λ₯Ό λ°›μ•„ Gemini에 슀트리밍 μš”μ²­,
151
- μ΅œμ’… 응닡 ν…μŠ€νŠΈλ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
152
  """
153
  init_msgs = [
154
  ChatMessage(role="system", content=system_message),
@@ -161,7 +159,6 @@ def gemini_chat_completion(system_message: str, user_message: str, max_tokens: i
161
  for chunk in chat.send_message(user_message, stream=True):
162
  parts = chunk.candidates[0].content.parts
163
  if len(parts) == 2:
164
- # Thinking + μ΅œμ’…μ‘λ‹΅
165
  final += parts[1].text
166
  else:
167
  final += parts[0].text
@@ -180,7 +177,6 @@ def summarize_code(app_content: str):
180
 
181
 
182
  def analyze_code(app_content: str):
183
- # μ‹œμŠ€ν…œ ν”„λ‘¬ν”„νŠΈμ— 'λ”₯μ”½ν‚Ή' μ•ˆλ‚΄λ¬Έ μΆ”κ°€
184
  system_msg = (
185
  "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
186
  "and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
@@ -201,7 +197,6 @@ def analyze_code(app_content: str):
201
 
202
 
203
  def explain_usage(app_content: str):
204
- # μ‹œμŠ€ν…œ ν”„λ‘¬ν”„νŠΈμ— 'λ”₯μ”½ν‚Ή' μ•ˆλ‚΄λ¬Έ μΆ”κ°€
205
  system_msg = (
206
  "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
207
  "and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
@@ -217,25 +212,30 @@ def explain_usage(app_content: str):
217
 
218
  def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
219
  """
220
- conversation_state: ChatMessage 객체둜만 이루어진 'λŒ€ν™” 이λ ₯' (Gradio State).
221
- (μˆ˜μ •) 빈 λ¬Έμžμ—΄μ΄μ–΄λ„ μ²˜λ¦¬ν•˜λ„λ‘ λ³€κ²½. μ—λŸ¬λ₯Ό λ„μš°μ§€ μ•ŠμŒ.
222
  """
223
- # κΈ°μ‘΄μ—λŠ” if not user_message.strip(): ... return ν–ˆμœΌλ‚˜,
224
- # "Please provide a non-empty text message..." 였λ₯˜κ°€ λΆˆνŽΈν•˜λ‹€λŠ” μš”μ²­μœΌλ‘œ 제거/완화함.
225
- # ν•„μš”ν•˜λ‹€λ©΄ user_messageκ°€ 정말 아무것도 없을 λ•Œ 처리 λ‘œμ§μ„ μΆ”κ°€ν•˜μ„Έμš”.
226
-
227
- print(f"\n=== New Request ===\nUser message: {user_message if user_message.strip() else '(Empty)'}")
 
 
 
 
 
228
 
229
- # κΈ°μ‘΄ λŒ€ν™”λ₯Ό Gemini ν˜•μ‹μœΌλ‘œ λ³€ν™˜
230
  chat_history = format_chat_history(conversation_state)
231
  chat = model.start_chat(history=chat_history)
232
-
233
  response = chat.send_message(user_message, stream=True)
 
234
  thought_buffer = ""
235
  response_buffer = ""
236
  thinking_complete = False
237
 
238
- # 'Thinking' ν‘œμ‹œμš©
239
  conversation_state.append(
240
  ChatMessage(
241
  role="assistant",
@@ -290,7 +290,7 @@ def stream_gemini_response(user_message: str, conversation_state: List[ChatMessa
290
  conversation_state.append(
291
  ChatMessage(
292
  role="assistant",
293
- content=f"I apologize, but I encountered an error: {str(e)}"
294
  )
295
  )
296
  yield conversation_state
@@ -298,7 +298,7 @@ def stream_gemini_response(user_message: str, conversation_state: List[ChatMessa
298
 
299
  def convert_to_display_tuples(messages: List[ChatMessage]) -> List[Tuple[str, str]]:
300
  """
301
- 화면에 ν‘œμ‹œν•˜κΈ° μœ„ν•΄ (user, assistant) νŠœν”Œ λͺ©λ‘μœΌλ‘œ λ³€ν™˜
302
  """
303
  result = []
304
  i = 0
@@ -321,8 +321,7 @@ def convert_to_display_tuples(messages: List[ChatMessage]) -> List[Tuple[str, st
321
 
322
  def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
323
  """
324
- μ‚¬μš©μžκ°€ λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•  λ•Œ 호좜.
325
- ChatMessage 리슀트(conversation_state)에 user λ©”μ‹œμ§€λ₯Ό μΆ”κ°€ν•œ λ’€ λ°˜ν™˜.
326
  """
327
  conversation_state.append(ChatMessage(role="user", content=msg))
328
  # μž…λ ₯창은 λΉ„μ›Œμ€Œ
@@ -331,11 +330,9 @@ def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
331
 
332
  def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
333
  """
334
- μœ μ € λ©”μ‹œμ§€λ₯Ό λ°›μ•„ Geminiμ—κ²Œ μš”μ²­(슀트리밍)ν•˜κ³ , λŒ€ν™” 이λ ₯을 μ—…λ°μ΄νŠΈ ν›„
335
- ν™”λ©΄μ—λŠ” (user, assistant) νŠœν”Œμ„ λ°˜ν™˜ν•œλ‹€.
336
  """
337
  for updated_messages in stream_gemini_response(message, conversation_state):
338
- # ν™”λ©΄ ν‘œμ‹œμš© (user, assistant) νŠœν”Œλ‘œ λ³€ν™˜
339
  yield "", convert_to_display_tuples(updated_messages)
340
 
341
 
@@ -381,10 +378,11 @@ def create_ui():
381
  with gr.TabItem("AI μ½”λ“œμ±—"):
382
  gr.Markdown("## 예제λ₯Ό μž…λ ₯ λ˜λŠ” μ†ŒμŠ€ μ½”λ“œλ₯Ό λΆ™μ—¬λ„£κ³  μ§ˆλ¬Έν•˜μ„Έμš”")
383
 
384
- # Chatbot은 단지 좜λ ₯만 λ‹΄λ‹Ή(νŠœν”Œμ„ λ°›μ•„ ν‘œμ‹œ)
385
  chatbot = gr.Chatbot(
386
  label="λŒ€ν™”",
387
- height=400
 
388
  )
389
 
390
  msg = gr.Textbox(
@@ -392,7 +390,6 @@ def create_ui():
392
  placeholder="λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”..."
393
  )
394
 
395
- # μˆ¨κ²¨μ§„ νŒŒλΌλ―Έν„°
396
  max_tokens = gr.Slider(
397
  minimum=1, maximum=8000,
398
  value=4000, label="Max Tokens",
@@ -419,12 +416,11 @@ def create_ui():
419
  ]
420
  gr.Examples(examples, inputs=msg)
421
 
422
- # λŒ€ν™” μƒνƒœ(μ±„νŒ… 기둝)λŠ” ChatMessage 객체둜만 μœ μ§€
423
  conversation_state = gr.State([])
424
 
425
- # 이벀트 체인
426
- # 1) μœ μ € λ©”μ‹œμ§€ -> user_submit_message -> (μž…λ ₯μ°½ 비움 + stateμΆ”κ°€)
427
- # 2) respond_wrapper -> Gemini 슀트리밍 -> λŒ€ν™” state κ°±μ‹  -> (user,assistant) νŠœν”Œ λ³€ν™˜
428
  msg.submit(
429
  user_submit_message,
430
  inputs=[msg, conversation_state],
@@ -433,7 +429,7 @@ def create_ui():
433
  ).then(
434
  respond_wrapper,
435
  inputs=[msg, conversation_state, max_tokens, temperature, top_p],
436
- outputs=[msg, chatbot],
437
  )
438
 
439
  with gr.TabItem("Recommended Best"):
@@ -441,7 +437,7 @@ def create_ui():
441
  "Discover recommended HuggingFace Spaces [here](https://huggingface.co/spaces/openfree/Korean-Leaderboard)."
442
  )
443
 
444
- # 뢄석 λ²„νŠΌ 둜직
445
  space_id_state = gr.State()
446
  tree_structure_state = gr.State()
447
  app_py_content_lines = gr.State()
 
68
  return tree_data["error"]
69
  formatted = f"{indent}{'πŸ“' if tree_data.get('type') == 'directory' else 'πŸ“„'} {tree_data.get('name', 'Unknown')}\n"
70
  if tree_data.get("type") == "directory":
71
+ # 디렉토리λ₯Ό λ¨Όμ €, νŒŒμΌμ„ λ‚˜μ€‘μ— ν‘œμ‹œ
72
  for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
73
  formatted += format_tree_structure(child, indent + " ")
74
  return formatted
 
105
  progress(0.9, desc="μ‚¬μš©λ²• μ„€λͺ… 생성 쀑...")
106
  usage = explain_usage(app_content)
107
 
 
108
  lines_for_app_py = adjust_lines_for_code(app_content)
109
 
110
  progress(1.0, desc="μ™„λ£Œ")
 
123
  num_lines = len(code_content.split('\n'))
124
  return min(max(num_lines, min_lines), max_lines)
125
 
 
126
  # --------------------------------------------------
127
+ # Gemini 2.0 Flash Thinking λͺ¨λΈ (LLM) ν•¨μˆ˜λ“€
128
  # --------------------------------------------------
129
  from gradio import ChatMessage
130
 
131
  def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
132
  """
133
  ChatMessage λͺ©λ‘μ„ Gemini λͺ¨λΈμ΄ 이해할 수 μžˆλŠ” ν˜•μ‹μœΌλ‘œ λ³€ν™˜
134
+ (Thinking 메타데이터가 μžˆλŠ” λ©”μ‹œμ§€λŠ” λ¬΄μ‹œ)
135
  """
136
  formatted = []
137
  for m in messages:
138
+ if hasattr(m, "metadata") and m.metadata: # 'Thinking' λ©”μ‹œμ§€λŠ” μ œμ™Έ
 
139
  continue
140
  role = "assistant" if m.role == "assistant" else "user"
141
  formatted.append({"role": role, "parts": [m.content or ""]})
142
  return formatted
143
 
144
 
145
+ import google.generativeai as genai
146
+
147
  def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
148
  """
149
+ μ‹œμŠ€ν…œ & μœ μ € λ©”μ‹œμ§€λ‘œ Gemini λͺ¨λΈμ—κ²Œ 슀트리밍 μš”μ²­. μ΅œμ’… ν…μŠ€νŠΈ λ°˜ν™˜
 
150
  """
151
  init_msgs = [
152
  ChatMessage(role="system", content=system_message),
 
159
  for chunk in chat.send_message(user_message, stream=True):
160
  parts = chunk.candidates[0].content.parts
161
  if len(parts) == 2:
 
162
  final += parts[1].text
163
  else:
164
  final += parts[0].text
 
177
 
178
 
179
  def analyze_code(app_content: str):
 
180
  system_msg = (
181
  "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
182
  "and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
 
197
 
198
 
199
  def explain_usage(app_content: str):
 
200
  system_msg = (
201
  "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
202
  "and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
 
212
 
213
  def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
214
  """
215
+ Gemini에 슀트리밍 μš”μ²­.
216
+ 빈 λ©”μ‹œμ§€λ„ μ—¬κΈ°μ„œ 처리(μ—λŸ¬ 없이)ν•˜λ„λ‘ 함.
217
  """
218
+ # λ§Œμ•½ user_messageκ°€ μ™„μ „ 빈 λ¬Έμžμ—΄μ΄λΌλ©΄, λͺ¨λΈ 호좜 λŒ€μ‹  간단 μ•ˆλ‚΄
219
+ if not user_message.strip():
220
+ conversation_state.append(
221
+ ChatMessage(
222
+ role="assistant",
223
+ content="(Note: You sent an empty message. No LLM call was made.)"
224
+ )
225
+ )
226
+ yield conversation_state
227
+ return
228
 
229
+ print(f"\n=== New Request ===\nUser message: {user_message}")
230
  chat_history = format_chat_history(conversation_state)
231
  chat = model.start_chat(history=chat_history)
 
232
  response = chat.send_message(user_message, stream=True)
233
+
234
  thought_buffer = ""
235
  response_buffer = ""
236
  thinking_complete = False
237
 
238
+ # 'Thinking' ν‘œμ‹œμš© λ©”μ‹œμ§€ μΆ”κ°€
239
  conversation_state.append(
240
  ChatMessage(
241
  role="assistant",
 
290
  conversation_state.append(
291
  ChatMessage(
292
  role="assistant",
293
+ content=f"I apologize, but encountered an error: {str(e)}"
294
  )
295
  )
296
  yield conversation_state
 
298
 
299
  def convert_to_display_tuples(messages: List[ChatMessage]) -> List[Tuple[str, str]]:
300
  """
301
+ ChatMessage 리슀트 -> (user, assistant) νŠœν”Œ 리슀트
302
  """
303
  result = []
304
  i = 0
 
321
 
322
  def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
323
  """
324
+ μ‚¬μš©μžκ°€ λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•  λ•Œ 호좜
 
325
  """
326
  conversation_state.append(ChatMessage(role="user", content=msg))
327
  # μž…λ ₯창은 λΉ„μ›Œμ€Œ
 
330
 
331
  def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
332
  """
333
+ Gemini에 슀트리밍 μš”μ²­ -> λŒ€ν™” 이λ ₯을 κ°±μ‹  -> (user, assistant) νŠœν”Œλ‘œ λ³€ν™˜ν•˜μ—¬ 화면에 ν‘œμ‹œ
 
334
  """
335
  for updated_messages in stream_gemini_response(message, conversation_state):
 
336
  yield "", convert_to_display_tuples(updated_messages)
337
 
338
 
 
378
  with gr.TabItem("AI μ½”λ“œμ±—"):
379
  gr.Markdown("## 예제λ₯Ό μž…λ ₯ λ˜λŠ” μ†ŒμŠ€ μ½”λ“œλ₯Ό λΆ™μ—¬λ„£κ³  μ§ˆλ¬Έν•˜μ„Έμš”")
380
 
381
+ # Chatbot에 type="messages"둜 μ„€μ • (ꢌμž₯)
382
  chatbot = gr.Chatbot(
383
  label="λŒ€ν™”",
384
+ height=400,
385
+ type="messages"
386
  )
387
 
388
  msg = gr.Textbox(
 
390
  placeholder="λ©”μ‹œμ§€λ₯Ό μž…λ ₯ν•˜μ„Έμš”..."
391
  )
392
 
 
393
  max_tokens = gr.Slider(
394
  minimum=1, maximum=8000,
395
  value=4000, label="Max Tokens",
 
416
  ]
417
  gr.Examples(examples, inputs=msg)
418
 
419
+ # λŒ€ν™” μƒνƒœ(μ±„νŒ… 기둝)λŠ” ChatMessage 객체둜만 관리
420
  conversation_state = gr.State([])
421
 
422
+ # 1) μœ μ € λ©”μ‹œμ§€ μž…λ ₯ -> user_submit_message
423
+ # 2) respond_wrapper -> Gemini 슀트리밍 -> λŒ€ν™” μ—…λ°μ΄νŠΈ -> (user,assistant) λ³€ν™˜ν•˜μ—¬ chatbot ν‘œμ‹œ
 
424
  msg.submit(
425
  user_submit_message,
426
  inputs=[msg, conversation_state],
 
429
  ).then(
430
  respond_wrapper,
431
  inputs=[msg, conversation_state, max_tokens, temperature, top_p],
432
+ outputs=[msg, chatbot],
433
  )
434
 
435
  with gr.TabItem("Recommended Best"):
 
437
  "Discover recommended HuggingFace Spaces [here](https://huggingface.co/spaces/openfree/Korean-Leaderboard)."
438
  )
439
 
440
+ # 뢄석 νƒ­ 둜직
441
  space_id_state = gr.State()
442
  tree_structure_state = gr.State()
443
  app_py_content_lines = gr.State()