ginipick commited on
Commit
a8eb718
ยท
verified ยท
1 Parent(s): 2606695

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +195 -109
app.py CHANGED
@@ -1,26 +1,30 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient, HfApi
3
  import os
 
 
 
4
  import requests
5
- from typing import List, Dict, Union, Tuple
6
  import traceback
7
- from PIL import Image
8
- from io import BytesIO
9
- import asyncio
10
- from gradio_client import Client
11
  import time
12
  import threading
13
  import json
14
- import re
 
 
15
 
16
  # HuggingFace ๊ด€๋ จ API ํ‚ค (์ŠคํŽ˜์ด์Šค ๋ถ„์„ ์šฉ)
17
  HF_TOKEN = os.getenv("HF_TOKEN")
18
  hf_api = HfApi(token=HF_TOKEN)
19
 
20
- # Gemini 2.0 Thinking ๋ชจ๋ธ ๊ด€๋ จ API ํ‚ค ๋ฐ ํด๋ผ์ด์–ธํŠธ (LLM ์šฉ)
21
- G_API_KEY = os.getenv("G_API_KEY")
22
- gemini_client = InferenceClient("gemini-2.0-flash-thinking-exp-01-21", token=G_API_KEY)
 
23
 
 
 
 
24
  def get_headers():
25
  if not HF_TOKEN:
26
  raise ValueError("Hugging Face token not found in environment variables")
@@ -75,57 +79,6 @@ def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
75
  formatted += format_tree_structure(child, indent + " ")
76
  return formatted
77
 
78
- def summarize_code(app_content: str):
79
- system_message = "๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜๊ณ  ์š”์•ฝํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ 3์ค„ ์ด๋‚ด๋กœ ๊ฐ„๊ฒฐํ•˜๊ฒŒ ์š”์•ฝํ•ด์ฃผ์„ธ์š”."
80
- user_message = f"๋‹ค์Œ Python ์ฝ”๋“œ๋ฅผ 3์ค„ ์ด๋‚ด๋กœ ์š”์•ฝํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
81
-
82
- messages = [
83
- {"role": "system", "content": system_message},
84
- {"role": "user", "content": user_message}
85
- ]
86
-
87
- try:
88
- response = gemini_client.chat_completion(messages, max_tokens=200, temperature=0.7)
89
- return response.choices[0].message.content
90
- except Exception as e:
91
- return f"์š”์•ฝ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
92
-
93
- def analyze_code(app_content: str):
94
- system_message = """๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜์—ฌ ๋‹ค์Œ ํ•ญ๋ชฉ์— ๋Œ€ํ•ด ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”:
95
- A. ๋ฐฐ๊ฒฝ ๋ฐ ํ•„์š”์„ฑ
96
- B. ๊ธฐ๋Šฅ์  ํšจ์šฉ์„ฑ ๋ฐ ๊ฐ€์น˜
97
- C. ํŠน์žฅ์ 
98
- D. ์ ์šฉ ๋Œ€์ƒ ๋ฐ ํƒ€๊ฒŸ
99
- E. ๊ธฐ๋Œ€ํšจ๊ณผ
100
- ๊ธฐ์กด ๋ฐ ์œ ์‚ฌ ํ”„๋กœ์ ํŠธ์™€ ๋น„๊ตํ•˜์—ฌ ๋ถ„์„ํ•ด์ฃผ์„ธ์š”. Markdown ํ˜•์‹์œผ๋กœ ์ถœ๋ ฅํ•˜์„ธ์š”."""
101
- user_message = f"๋‹ค์Œ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
102
-
103
- messages = [
104
- {"role": "system", "content": system_message},
105
- {"role": "user", "content": user_message}
106
- ]
107
-
108
- try:
109
- response = gemini_client.chat_completion(messages, max_tokens=1000, temperature=0.7)
110
- return response.choices[0].message.content
111
- except Exception as e:
112
- return f"๋ถ„์„ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
113
-
114
- def explain_usage(app_content: str):
115
- system_message = "๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜์—ฌ ์‚ฌ์šฉ๋ฒ•์„ ์„ค๋ช…ํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ ๋ฐ”ํƒ•์œผ๋กœ ๋งˆ์น˜ ํ™”๋ฉด์„ ๋ณด๋Š” ๊ฒƒ์ฒ˜๋Ÿผ ์‚ฌ์šฉ๋ฒ•์„ ์ƒ์„ธํžˆ ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”. Markdown ํ˜•์‹์œผ๋กœ ์ถœ๋ ฅํ•˜์„ธ์š”."
116
- user_message = f"๋‹ค์Œ Python ์ฝ”๋“œ์˜ ์‚ฌ์šฉ๋ฒ•์„ ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
117
-
118
- messages = [
119
- {"role": "system", "content": system_message},
120
- {"role": "user", "content": user_message}
121
- ]
122
-
123
- try:
124
- response = gemini_client.chat_completion(messages, max_tokens=800, temperature=0.7)
125
- return response.choices[0].message.content
126
- except Exception as e:
127
- return f"์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
128
-
129
  def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
130
  """
131
  ์ฝ”๋“œ ๋‚ด์šฉ์— ๋”ฐ๋ผ lines ์ˆ˜๋ฅผ ๋™์ ์œผ๋กœ ์กฐ์ •ํ•ฉ๋‹ˆ๋‹ค.
@@ -138,16 +91,13 @@ def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int
138
  Returns:
139
  - int: ์„ค์ •๋œ lines ์ˆ˜
140
  """
141
- # ์ฝ”๋“œ์˜ ์ค„ ์ˆ˜ ๊ณ„์‚ฐ
142
  num_lines = len(code_content.split('\n'))
143
- # ์ค„ ์ˆ˜๊ฐ€ min_lines๋ณด๋‹ค ์ ๋‹ค๋ฉด min_lines ์‚ฌ์šฉ, max_lines๋ณด๋‹ค ํฌ๋ฉด max_lines ์‚ฌ์šฉ
144
  return min(max(num_lines, min_lines), max_lines)
145
 
146
  def analyze_space(url: str, progress=gr.Progress()):
147
  try:
148
  space_id = url.split('spaces/')[-1]
149
 
150
- # Space ID ์œ ํšจ์„ฑ ๊ฒ€์‚ฌ ์ˆ˜์ •
151
  if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
152
  raise ValueError(f"Invalid Space ID format: {space_id}")
153
 
@@ -169,7 +119,6 @@ def analyze_space(url: str, progress=gr.Progress()):
169
  progress(0.9, desc="์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘...")
170
  usage = explain_usage(app_content)
171
 
172
- # ์ค„ ์ˆ˜ ๊ณ„์‚ฐํ•˜์—ฌ lines ์„ค์ •
173
  app_py_lines = adjust_lines_for_code(app_content)
174
 
175
  progress(1.0, desc="์™„๋ฃŒ")
@@ -179,38 +128,178 @@ def analyze_space(url: str, progress=gr.Progress()):
179
  print(traceback.format_exc())
180
  return f"์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}", "", None, "", "", "", "", 10
181
 
182
- def respond(
183
- message: str,
184
- history: List[Tuple[str, str]],
185
- system_message: str = "",
186
- max_tokens: int = 1024,
187
- temperature: float = 0.7,
188
- top_p: float = 0.9,
189
- ):
190
- system_prefix = """๋‹น์‹ ์€ ํ—ˆ๊น…ํŽ˜์ด์Šค์— ํŠนํ™”๋œ AI ์ฝ”๋”ฉ ์ „๋ฌธ๊ฐ€์ž…๋‹ˆ๋‹ค. ์‚ฌ์šฉ์ž์˜ ์งˆ๋ฌธ์— ์นœ์ ˆํ•˜๊ณ  ์ƒ์„ธํ•˜๊ฒŒ ๋‹ต๋ณ€ํ•ด์ฃผ์„ธ์š”.
191
- Gradio ํŠน์„ฑ์„ ์ •ํ™•ํžˆ ์ธ์‹ํ•˜๊ณ  Requirements.txt ๋ˆ„๋ฝ์—†์ด ์ฝ”๋”ฉ๊ณผ ์˜ค๋ฅ˜๋ฅผ ํ•ด๊ฒฐํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค.
192
- ํ•ญ์ƒ ์ •ํ™•ํ•˜๊ณ  ์œ ์šฉํ•œ ์ •๋ณด๋ฅผ ์ œ๊ณตํ•˜๋„๋ก ๋…ธ๋ ฅํ•˜์„ธ์š”."""
193
-
194
- messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}]
195
- for user_msg, assistant_msg in history:
196
- messages.append({"role": "user", "content": user_msg})
197
- if assistant_msg:
198
- messages.append({"role": "assistant", "content": assistant_msg})
199
- messages.append({"role": "user", "content": message})
200
-
201
- response = ""
202
- for message in gemini_client.chat_completion(
203
- messages,
204
- max_tokens=max_tokens,
205
- stream=True,
206
- temperature=temperature,
207
- top_p=top_p,
208
- ):
209
- token = message.choices[0].delta.get('content', None)
210
- if token:
211
- response += token.strip("")
212
- yield response
 
 
 
 
 
 
 
 
 
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  def create_ui():
215
  try:
216
  css = """
@@ -292,12 +381,12 @@ def create_ui():
292
  """
293
 
294
  with gr.Blocks(theme="default", css=css) as demo:
295
- gr.Markdown("# MOUSE: HF Space Deep-Research", elem_classes="header-markdown")
296
 
297
  with gr.Tabs() as tabs:
298
  with gr.TabItem("๋ถ„์„"):
299
  with gr.Row():
300
- with gr.Column(scale=6): # ์™ผ์ชฝ ์˜์—ญ
301
  url_input = gr.Textbox(label="HuggingFace Space URL", placeholder="์˜ˆ: https://huggingface.co/spaces/username/space_name")
302
  analyze_button = gr.Button("๋ถ„์„", variant="primary")
303
 
@@ -313,7 +402,7 @@ def create_ui():
313
  with gr.Group(elem_classes="output-group tree-view-scroll"):
314
  tree_view_output = gr.Textbox(label="ํŒŒ์ผ ๊ตฌ์กฐ (Tree View)", lines=30)
315
 
316
- with gr.Column(scale=4): # ์˜ค๋ฅธ์ชฝ ์˜์—ญ
317
  with gr.Group(elem_classes="output-group full-height"):
318
  code_tabs = gr.Tabs()
319
  with code_tabs:
@@ -332,7 +421,10 @@ def create_ui():
332
  )
333
 
334
  with gr.TabItem("AI ์ฝ”๋”ฉ"):
335
- chatbot = gr.Chatbot(label="๋Œ€ํ™”", elem_classes="output-group full-height")
 
 
 
336
 
337
  msg = gr.Textbox(label="๋ฉ”์‹œ์ง€", placeholder="๋ฉ”์‹œ์ง€๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”...")
338
 
@@ -352,14 +444,9 @@ def create_ui():
352
  gr.Examples(examples, inputs=msg)
353
 
354
  def respond_wrapper(message, chat_history, max_tokens, temperature, top_p):
355
- bot_message = ""
356
- for response in respond(message, chat_history, max_tokens=max_tokens, temperature=temperature, top_p=top_p):
357
- bot_message = response # ๋งˆ์ง€๋ง‰ ์‘๋‹ต ์ €์žฅ
358
- yield "", chat_history + [(message, bot_message)]
359
 
360
- chat_history.append((message, bot_message))
361
- return "", chat_history
362
-
363
  msg.submit(respond_wrapper, [msg, chatbot, max_tokens, temperature, top_p], [msg, chatbot])
364
 
365
  with gr.TabItem("Recommended Best"):
@@ -383,7 +470,6 @@ def create_ui():
383
  outputs=[requirements_content]
384
  )
385
 
386
- # ๋™์ ์œผ๋กœ app.py์˜ ์ค„ ์ˆ˜ ์กฐ์ •
387
  app_py_content.change(lambda lines: gr.update(lines=lines), inputs=[app_py_content_lines], outputs=[app_py_content])
388
 
389
  return demo
 
1
+
 
2
  import os
3
+ import gradio as gr
4
+ from gradio import ChatMessage
5
+ from huggingface_hub import HfApi
6
  import requests
7
+ import re
8
  import traceback
 
 
 
 
9
  import time
10
  import threading
11
  import json
12
+ import asyncio
13
+ from typing import List, Dict, Tuple, Iterator
14
+ import google.generativeai as genai
15
 
16
  # HuggingFace ๊ด€๋ จ API ํ‚ค (์ŠคํŽ˜์ด์Šค ๋ถ„์„ ์šฉ)
17
  HF_TOKEN = os.getenv("HF_TOKEN")
18
  hf_api = HfApi(token=HF_TOKEN)
19
 
20
+ # Gemini 2.0 Flash Thinking ๋ชจ๋ธ ๊ด€๋ จ API ํ‚ค ๋ฐ ํด๋ผ์ด์–ธํŠธ (LLM ์šฉ)
21
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
22
+ genai.configure(api_key=GEMINI_API_KEY)
23
+ model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
24
 
25
+ # --------------------------------------------------
26
+ # ํŒŒ์ผ ๋ฐ ์ŠคํŽ˜์ด์Šค ๋ถ„์„ ๊ด€๋ จ ํ•จ์ˆ˜๋“ค (๊ธฐ์กด ์ฝ”๋“œ ์œ ์ง€)
27
+ # --------------------------------------------------
28
  def get_headers():
29
  if not HF_TOKEN:
30
  raise ValueError("Hugging Face token not found in environment variables")
 
79
  formatted += format_tree_structure(child, indent + " ")
80
  return formatted
81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
83
  """
84
  ์ฝ”๋“œ ๋‚ด์šฉ์— ๋”ฐ๋ผ lines ์ˆ˜๋ฅผ ๋™์ ์œผ๋กœ ์กฐ์ •ํ•ฉ๋‹ˆ๋‹ค.
 
91
  Returns:
92
  - int: ์„ค์ •๋œ lines ์ˆ˜
93
  """
 
94
  num_lines = len(code_content.split('\n'))
 
95
  return min(max(num_lines, min_lines), max_lines)
96
 
97
  def analyze_space(url: str, progress=gr.Progress()):
98
  try:
99
  space_id = url.split('spaces/')[-1]
100
 
 
101
  if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
102
  raise ValueError(f"Invalid Space ID format: {space_id}")
103
 
 
119
  progress(0.9, desc="์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘...")
120
  usage = explain_usage(app_content)
121
 
 
122
  app_py_lines = adjust_lines_for_code(app_content)
123
 
124
  progress(1.0, desc="์™„๋ฃŒ")
 
128
  print(traceback.format_exc())
129
  return f"์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}", "", None, "", "", "", "", 10
130
 
131
+ # --------------------------------------------------
132
+ # Gemini 2.0 Flash Thinking ๋ชจ๋ธ ๊ด€๋ จ ํ—ฌํผ ํ•จ์ˆ˜๋“ค
133
+ # --------------------------------------------------
134
+ def format_chat_history(messages: list) -> list:
135
+ """
136
+ Gradio ChatMessage ๊ฐ์ฒด ๋ฆฌ์ŠคํŠธ๋ฅผ Gemini๊ฐ€ ์ดํ•ดํ•  ์ˆ˜ ์žˆ๋Š” ํ˜•์‹์œผ๋กœ ๋ณ€ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
137
+ """
138
+ formatted_history = []
139
+ for message in messages:
140
+ # thinking ๋ฉ”์‹œ์ง€(๋ฉ”ํƒ€๋ฐ์ดํ„ฐ๊ฐ€ ์žˆ๋Š” ๋ฉ”์‹œ์ง€)๋Š” ๊ฑด๋„ˆ๋œ๋‹ˆ๋‹ค.
141
+ if not (hasattr(message, "metadata") and message.metadata):
142
+ formatted_history.append({
143
+ "role": "user" if message.role == "user" else "assistant",
144
+ "parts": [message.content or ""]
145
+ })
146
+ return formatted_history
147
+
148
+ def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
149
+ """
150
+ ์‹œ์Šคํ…œ ๋ฐ ์œ ์ € ๋ฉ”์‹œ์ง€๋ฅผ ๋ณด๋‚ด๊ณ  ์ŠคํŠธ๋ฆฌ๋ฐ ์‘๋‹ต์„ ๋ˆ„์ ํ•˜์—ฌ ์ตœ์ข… ํ…์ŠคํŠธ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
151
+ """
152
+ # ์ดˆ๊ธฐ ๋Œ€ํ™” ๊ธฐ๋ก ๊ตฌ์„ฑ (๋‹จ์ˆœ ๋ฌธ์ž์—ด ๊ธฐ๋ฐ˜)
153
+ initial_messages = [
154
+ ChatMessage(role="system", content=system_message),
155
+ ChatMessage(role="user", content=user_message)
156
+ ]
157
+ chat_history = format_chat_history(initial_messages)
158
+ chat = model.start_chat(history=chat_history)
159
+ final_response = ""
160
+ try:
161
+ for chunk in chat.send_message(user_message, stream=True):
162
+ parts = chunk.candidates[0].content.parts
163
+ # ์ƒ๊ฐ(thinking) ๋ถ€๋ถ„๊ณผ ์ตœ์ข… ์‘๋‹ต ๊ตฌ๋ถ„์ด ์žˆ๋‹ค๋ฉด ์ตœ์ข… ์‘๋‹ต์„ ์‚ฌ์šฉ
164
+ if len(parts) == 2:
165
+ final_response += parts[1].text
166
+ else:
167
+ final_response += parts[0].text
168
+ return final_response.strip()
169
+ except Exception as e:
170
+ return f"LLM ํ˜ธ์ถœ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
171
 
172
+ def summarize_code(app_content: str):
173
+ system_message = "๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜๊ณ  ์š”์•ฝํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ 3์ค„ ์ด๋‚ด๋กœ ๊ฐ„๊ฒฐํ•˜๊ฒŒ ์š”์•ฝํ•ด์ฃผ์„ธ์š”."
174
+ user_message = f"๋‹ค์Œ Python ์ฝ”๋“œ๋ฅผ 3์ค„ ์ด๋‚ด๋กœ ์š”์•ฝํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
175
+ try:
176
+ return gemini_chat_completion(system_message, user_message, max_tokens=200, temperature=0.7)
177
+ except Exception as e:
178
+ return f"์š”์•ฝ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
179
+
180
+ def analyze_code(app_content: str):
181
+ system_message = (
182
+ "๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜์—ฌ ๋‹ค์Œ ํ•ญ๋ชฉ์— ๋Œ€ํ•ด ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”:\n"
183
+ "A. ๋ฐฐ๊ฒฝ ๋ฐ ํ•„์š”์„ฑ\n"
184
+ "B. ๊ธฐ๋Šฅ์  ํšจ์šฉ์„ฑ ๋ฐ ๊ฐ€์น˜\n"
185
+ "C. ํŠน์žฅ์ \n"
186
+ "D. ์ ์šฉ ๋Œ€์ƒ ๋ฐ ํƒ€๊ฒŸ\n"
187
+ "E. ๊ธฐ๋Œ€ํšจ๊ณผ\n"
188
+ "๊ธฐ์กด ๋ฐ ์œ ์‚ฌ ํ”„๋กœ์ ํŠธ์™€ ๋น„๊ตํ•˜์—ฌ ๋ถ„์„ํ•ด์ฃผ์„ธ์š”. Markdown ํ˜•์‹์œผ๋กœ ์ถœ๋ ฅํ•˜์„ธ์š”."
189
+ )
190
+ user_message = f"๋‹ค์Œ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
191
+ try:
192
+ return gemini_chat_completion(system_message, user_message, max_tokens=1000, temperature=0.7)
193
+ except Exception as e:
194
+ return f"๋ถ„์„ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
195
+
196
+ def explain_usage(app_content: str):
197
+ system_message = "๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜์—ฌ ์‚ฌ์šฉ๋ฒ•์„ ์„ค๋ช…ํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ ๋ฐ”ํƒ•์œผ๋กœ ๋งˆ์น˜ ํ™”๋ฉด์„ ๋ณด๋Š” ๊ฒƒ์ฒ˜๋Ÿผ ์‚ฌ์šฉ๋ฒ•์„ ์ƒ์„ธํžˆ ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”. Markdown ํ˜•์‹์œผ๋กœ ์ถœ๋ ฅํ•˜์„ธ์š”."
198
+ user_message = f"๋‹ค์Œ Python ์ฝ”๋“œ์˜ ๏ฟฝ๏ฟฝ๏ฟฝ์šฉ๋ฒ•์„ ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
199
+ try:
200
+ return gemini_chat_completion(system_message, user_message, max_tokens=800, temperature=0.7)
201
+ except Exception as e:
202
+ return f"์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
203
+
204
+ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
205
+ """
206
+ Gemini 2.0 Flash Thinking ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ์ŠคํŠธ๋ฆฌ๋ฐ ์‘๋‹ต์„ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค.
207
+ ์ด ํ•จ์ˆ˜๋Š” Gradio์˜ ChatMessage ๊ฐ์ฒด๋ฅผ ์‚ฌ์šฉํ•˜๋ฉฐ, ์‹ค์‹œ๊ฐ„ 'thinking' ๋ฐ ์ตœ์ข… ์‘๋‹ต์„ ๋ณด์—ฌ์ค๋‹ˆ๋‹ค.
208
+ """
209
+ if not user_message.strip():
210
+ messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message. Empty input is not allowed."))
211
+ yield messages
212
+ return
213
+
214
+ try:
215
+ print(f"\n=== New Request (Text) ===")
216
+ print(f"User message: {user_message}")
217
+
218
+ # ํฌ๋งท๋œ ๋Œ€ํ™” ๊ธฐ๋ก ์ƒ์„ฑ
219
+ chat_history = format_chat_history(messages)
220
+ chat = model.start_chat(history=chat_history)
221
+ response = chat.send_message(user_message, stream=True)
222
+
223
+ thought_buffer = ""
224
+ response_buffer = ""
225
+ thinking_complete = False
226
+
227
+ # ์ดˆ๊ธฐ 'thinking' ๋ฉ”์‹œ์ง€ ์ถ”๊ฐ€
228
+ messages.append(
229
+ ChatMessage(
230
+ role="assistant",
231
+ content="",
232
+ metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
233
+ )
234
+ )
235
+
236
+ for chunk in response:
237
+ parts = chunk.candidates[0].content.parts
238
+ current_chunk = parts[0].text
239
+
240
+ if len(parts) == 2 and not thinking_complete:
241
+ thought_buffer += current_chunk
242
+ print(f"\n=== Complete Thought ===\n{thought_buffer}")
243
+
244
+ messages[-1] = ChatMessage(
245
+ role="assistant",
246
+ content=thought_buffer,
247
+ metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
248
+ )
249
+ yield messages
250
+
251
+ response_buffer = parts[1].text
252
+ print(f"\n=== Starting Response ===\n{response_buffer}")
253
+
254
+ messages.append(
255
+ ChatMessage(
256
+ role="assistant",
257
+ content=response_buffer
258
+ )
259
+ )
260
+ thinking_complete = True
261
+
262
+ elif thinking_complete:
263
+ response_buffer += current_chunk
264
+ print(f"\n=== Response Chunk ===\n{current_chunk}")
265
+
266
+ messages[-1] = ChatMessage(
267
+ role="assistant",
268
+ content=response_buffer
269
+ )
270
+
271
+ else:
272
+ thought_buffer += current_chunk
273
+ print(f"\n=== Thinking Chunk ===\n{current_chunk}")
274
+
275
+ messages[-1] = ChatMessage(
276
+ role="assistant",
277
+ content=thought_buffer,
278
+ metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
279
+ )
280
+ yield messages
281
+
282
+ print(f"\n=== Final Response ===\n{response_buffer}")
283
+
284
+ except Exception as e:
285
+ print(f"\n=== Error ===\n{str(e)}")
286
+ messages.append(
287
+ ChatMessage(
288
+ role="assistant",
289
+ content=f"I apologize, but I encountered an error: {str(e)}"
290
+ )
291
+ )
292
+ yield messages
293
+
294
+ def respond(message: str, history: list) -> Iterator[list]:
295
+ """
296
+ ๊ธฐ์กด respond() ํ•จ์ˆ˜๋Š” stream_gemini_response()๋ฅผ ํ˜ธ์ถœํ•˜์—ฌ ๋Œ€ํ™” ํ˜•์‹์œผ๋กœ ์ŠคํŠธ๋ฆฌ๋ฐ ์‘๋‹ต์„ ์ œ๊ณตํ•ฉ๋‹ˆ๋‹ค.
297
+ """
298
+ return stream_gemini_response(message, history)
299
+
300
+ # --------------------------------------------------
301
+ # Gradio UI ๊ตฌ์„ฑ
302
+ # --------------------------------------------------
303
  def create_ui():
304
  try:
305
  css = """
 
381
  """
382
 
383
  with gr.Blocks(theme="default", css=css) as demo:
384
+ gr.Markdown("# MOUSE Space Analysis", elem_classes="header-markdown")
385
 
386
  with gr.Tabs() as tabs:
387
  with gr.TabItem("๋ถ„์„"):
388
  with gr.Row():
389
+ with gr.Column(scale=6):
390
  url_input = gr.Textbox(label="HuggingFace Space URL", placeholder="์˜ˆ: https://huggingface.co/spaces/username/space_name")
391
  analyze_button = gr.Button("๋ถ„์„", variant="primary")
392
 
 
402
  with gr.Group(elem_classes="output-group tree-view-scroll"):
403
  tree_view_output = gr.Textbox(label="ํŒŒ์ผ ๊ตฌ์กฐ (Tree View)", lines=30)
404
 
405
+ with gr.Column(scale=4):
406
  with gr.Group(elem_classes="output-group full-height"):
407
  code_tabs = gr.Tabs()
408
  with code_tabs:
 
421
  )
422
 
423
  with gr.TabItem("AI ์ฝ”๋”ฉ"):
424
+ chatbot = gr.Chatbot(
425
+ label="๋Œ€ํ™”",
426
+ elem_classes="output-group full-height"
427
+ )
428
 
429
  msg = gr.Textbox(label="๋ฉ”์‹œ์ง€", placeholder="๋ฉ”์‹œ์ง€๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”...")
430
 
 
444
  gr.Examples(examples, inputs=msg)
445
 
446
  def respond_wrapper(message, chat_history, max_tokens, temperature, top_p):
447
+ # stream_gemini_response๊ฐ€ ์ œ๋„ˆ๋ ˆ์ดํ„ฐ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
448
+ return "", stream_gemini_response(message, chat_history)
 
 
449
 
 
 
 
450
  msg.submit(respond_wrapper, [msg, chatbot, max_tokens, temperature, top_p], [msg, chatbot])
451
 
452
  with gr.TabItem("Recommended Best"):
 
470
  outputs=[requirements_content]
471
  )
472
 
 
473
  app_py_content.change(lambda lines: gr.update(lines=lines), inputs=[app_py_content_lines], outputs=[app_py_content])
474
 
475
  return demo