ginipick commited on
Commit
2f8b595
ยท
verified ยท
1 Parent(s): eac18b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -64
app.py CHANGED
@@ -17,13 +17,11 @@ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
17
  genai.configure(api_key=GEMINI_API_KEY)
18
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
19
 
20
-
21
  def get_headers():
22
  if not HF_TOKEN:
23
  raise ValueError("Hugging Face token not found in environment variables")
24
  return {"Authorization": f"Bearer {HF_TOKEN}"}
25
 
26
-
27
  def get_file_content(space_id: str, file_path: str) -> str:
28
  file_url = f"https://huggingface.co/spaces/{space_id}/raw/main/{file_path}"
29
  try:
@@ -35,7 +33,6 @@ def get_file_content(space_id: str, file_path: str) -> str:
35
  except requests.RequestException:
36
  return f"Error fetching content for file: {file_path}"
37
 
38
-
39
  def get_space_structure(space_id: str) -> Dict:
40
  try:
41
  files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
@@ -62,26 +59,20 @@ def get_space_structure(space_id: str) -> Dict:
62
  print(f"Error in get_space_structure: {str(e)}")
63
  return {"error": f"API request error: {str(e)}"}
64
 
65
-
66
  def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
67
  if "error" in tree_data:
68
  return tree_data["error"]
69
  formatted = f"{indent}{'๐Ÿ“' if tree_data.get('type') == 'directory' else '๐Ÿ“„'} {tree_data.get('name', 'Unknown')}\n"
70
  if tree_data.get("type") == "directory":
71
- # ๋””๋ ‰ํ† ๋ฆฌ๋ฅผ ๋จผ์ €, ํŒŒ์ผ์„ ๋‚˜์ค‘์— ํ‘œ์‹œ
72
  for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
73
  formatted += format_tree_structure(child, indent + " ")
74
  return formatted
75
 
 
 
 
76
 
77
  def analyze_space(url: str, progress=gr.Progress()):
78
- """
79
- HuggingFace Space์˜ app.py์™€ ํŒŒ์ผ๊ตฌ์กฐ ๋“ฑ์„ ๋ถˆ๋Ÿฌ์™€์„œ:
80
- 1) ์ฝ”๋“œ ์š”์•ฝ
81
- 2) ์ฝ”๋“œ ๋ถ„์„
82
- 3) ์‚ฌ์šฉ๋ฒ•
83
- ๋“ฑ์„ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
84
- """
85
  try:
86
  space_id = url.split('spaces/')[-1]
87
  if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
@@ -106,8 +97,8 @@ def analyze_space(url: str, progress=gr.Progress()):
106
  usage = explain_usage(app_content)
107
 
108
  lines_for_app_py = adjust_lines_for_code(app_content)
109
-
110
  progress(1.0, desc="์™„๋ฃŒ")
 
111
  return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, lines_for_app_py
112
 
113
  except Exception as e:
@@ -116,13 +107,6 @@ def analyze_space(url: str, progress=gr.Progress()):
116
  return f"์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}", "", None, "", "", "", "", 10
117
 
118
 
119
- def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
120
- """
121
- ์ฝ”๋“œ์˜ ์ค„ ์ˆ˜์— ๋งž์ถฐ ํ‘œ์‹œํ•  lines ์ˆ˜๋ฅผ ๋™์ ์œผ๋กœ ์กฐ์ •ํ•ฉ๋‹ˆ๋‹ค.
122
- """
123
- num_lines = len(code_content.split('\n'))
124
- return min(max(num_lines, min_lines), max_lines)
125
-
126
  # --------------------------------------------------
127
  # Gemini 2.0 Flash Thinking ๋ชจ๋ธ (LLM) ํ•จ์ˆ˜๋“ค
128
  # --------------------------------------------------
@@ -135,7 +119,7 @@ def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
135
  """
136
  formatted = []
137
  for m in messages:
138
- if hasattr(m, "metadata") and m.metadata: # 'Thinking' ๋ฉ”์‹œ์ง€๋Š” ์ œ์™ธ
139
  continue
140
  role = "assistant" if m.role == "assistant" else "user"
141
  formatted.append({"role": role, "parts": [m.content or ""]})
@@ -145,9 +129,6 @@ def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
145
  import google.generativeai as genai
146
 
147
  def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
148
- """
149
- ์‹œ์Šคํ…œ & ์œ ์ € ๋ฉ”์‹œ์ง€๋กœ Gemini ๋ชจ๋ธ์—๊ฒŒ ์ŠคํŠธ๋ฆฌ๋ฐ ์š”์ฒญ. ์ตœ์ข… ํ…์ŠคํŠธ ๋ฐ˜ํ™˜
150
- """
151
  init_msgs = [
152
  ChatMessage(role="system", content=system_message),
153
  ChatMessage(role="user", content=user_message)
@@ -175,7 +156,6 @@ def summarize_code(app_content: str):
175
  except Exception as e:
176
  return f"์š”์•ฝ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
177
 
178
-
179
  def analyze_code(app_content: str):
180
  system_msg = (
181
  "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
@@ -195,7 +175,6 @@ def analyze_code(app_content: str):
195
  except Exception as e:
196
  return f"๋ถ„์„ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
197
 
198
-
199
  def explain_usage(app_content: str):
200
  system_msg = (
201
  "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
@@ -213,9 +192,9 @@ def explain_usage(app_content: str):
213
  def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
214
  """
215
  Gemini์— ์ŠคํŠธ๋ฆฌ๋ฐ ์š”์ฒญ.
216
- ๋นˆ ๋ฉ”์‹œ์ง€๋„ ์—ฌ๊ธฐ์„œ ์ฒ˜๋ฆฌ(์—๋Ÿฌ ์—†์ด)ํ•˜๋„๋ก ํ•จ.
217
  """
218
- # ๋งŒ์•ฝ user_message๊ฐ€ ์™„์ „ ๋นˆ ๋ฌธ์ž์—ด์ด๋ผ๋ฉด, ๋ชจ๋ธ ํ˜ธ์ถœ ๋Œ€์‹  ๊ฐ„๋‹จ ์•ˆ๋‚ด
219
  if not user_message.strip():
220
  conversation_state.append(
221
  ChatMessage(
@@ -235,7 +214,6 @@ def stream_gemini_response(user_message: str, conversation_state: List[ChatMessa
235
  response_buffer = ""
236
  thinking_complete = False
237
 
238
- # 'Thinking' ํ‘œ์‹œ์šฉ ๋ฉ”์‹œ์ง€ ์ถ”๊ฐ€
239
  conversation_state.append(
240
  ChatMessage(
241
  role="assistant",
@@ -296,50 +274,31 @@ def stream_gemini_response(user_message: str, conversation_state: List[ChatMessa
296
  yield conversation_state
297
 
298
 
299
- def convert_to_display_tuples(messages: List[ChatMessage]) -> List[Tuple[str, str]]:
300
  """
301
- ChatMessage ๋ฆฌ์ŠคํŠธ -> (user, assistant) ํŠœํ”Œ ๋ฆฌ์ŠคํŠธ
 
302
  """
303
- result = []
304
- i = 0
305
- while i < len(messages):
306
- if messages[i].role == "user":
307
- user_text = messages[i].content
308
- assistant_text = ""
309
- if i + 1 < len(messages) and messages[i+1].role == "assistant":
310
- assistant_text = messages[i+1].content
311
- i += 2
312
- else:
313
- i += 1
314
- result.append((user_text, assistant_text))
315
- else:
316
- # assistant ๋‹จ๋…
317
- result.append(("", messages[i].content))
318
- i += 1
319
- return result
320
-
321
 
322
  def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
323
- """
324
- ์‚ฌ์šฉ์ž๊ฐ€ ๋ฉ”์‹œ์ง€๋ฅผ ์ž…๋ ฅํ•  ๋•Œ ํ˜ธ์ถœ
325
- """
326
  conversation_state.append(ChatMessage(role="user", content=msg))
327
- # ์ž…๋ ฅ์ฐฝ์€ ๋น„์›Œ์คŒ
328
  return "", conversation_state
329
 
330
-
331
  def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
332
  """
333
- Gemini์— ์ŠคํŠธ๋ฆฌ๋ฐ ์š”์ฒญ -> ๋Œ€ํ™” ์ด๋ ฅ์„ ๊ฐฑ์‹  -> (user, assistant) ํŠœํ”Œ๋กœ ๋ณ€ํ™˜ํ•˜์—ฌ ํ™”๋ฉด์— ํ‘œ์‹œ
334
  """
335
  for updated_messages in stream_gemini_response(message, conversation_state):
336
- yield "", convert_to_display_tuples(updated_messages)
 
337
 
338
 
339
  def create_ui():
340
- """
341
- Gradio UI๋ฅผ ๊ตฌ์„ฑํ•˜๋Š” ํ•จ์ˆ˜
342
- """
343
  try:
344
  css = """
345
  footer {visibility: hidden;}
@@ -378,11 +337,11 @@ def create_ui():
378
  with gr.TabItem("AI ์ฝ”๋“œ์ฑ—"):
379
  gr.Markdown("## ์˜ˆ์ œ๋ฅผ ์ž…๋ ฅ ๋˜๋Š” ์†Œ์Šค ์ฝ”๋“œ๋ฅผ ๋ถ™์—ฌ๋„ฃ๊ณ  ์งˆ๋ฌธํ•˜์„ธ์š”")
380
 
381
- # Chatbot์— type="messages"๋กœ ์„ค์ • (๊ถŒ์žฅ)
382
  chatbot = gr.Chatbot(
383
  label="๋Œ€ํ™”",
384
  height=400,
385
- type="messages"
386
  )
387
 
388
  msg = gr.Textbox(
@@ -416,11 +375,9 @@ def create_ui():
416
  ]
417
  gr.Examples(examples, inputs=msg)
418
 
419
- # ๋Œ€ํ™” ์ƒํƒœ(์ฑ„ํŒ… ๊ธฐ๋ก)๋Š” ChatMessage ๊ฐ์ฒด๋กœ๋งŒ ๊ด€๋ฆฌ
420
  conversation_state = gr.State([])
421
 
422
- # 1) ์œ ์ € ๋ฉ”์‹œ์ง€ ์ž…๋ ฅ -> user_submit_message
423
- # 2) respond_wrapper -> Gemini ์ŠคํŠธ๋ฆฌ๋ฐ -> ๋Œ€ํ™” ์—…๋ฐ์ดํŠธ -> (user,assistant) ๋ณ€ํ™˜ํ•˜์—ฌ chatbot ํ‘œ์‹œ
424
  msg.submit(
425
  user_submit_message,
426
  inputs=[msg, conversation_state],
 
17
  genai.configure(api_key=GEMINI_API_KEY)
18
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
19
 
 
20
  def get_headers():
21
  if not HF_TOKEN:
22
  raise ValueError("Hugging Face token not found in environment variables")
23
  return {"Authorization": f"Bearer {HF_TOKEN}"}
24
 
 
25
  def get_file_content(space_id: str, file_path: str) -> str:
26
  file_url = f"https://huggingface.co/spaces/{space_id}/raw/main/{file_path}"
27
  try:
 
33
  except requests.RequestException:
34
  return f"Error fetching content for file: {file_path}"
35
 
 
36
  def get_space_structure(space_id: str) -> Dict:
37
  try:
38
  files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
 
59
  print(f"Error in get_space_structure: {str(e)}")
60
  return {"error": f"API request error: {str(e)}"}
61
 
 
62
  def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
63
  if "error" in tree_data:
64
  return tree_data["error"]
65
  formatted = f"{indent}{'๐Ÿ“' if tree_data.get('type') == 'directory' else '๐Ÿ“„'} {tree_data.get('name', 'Unknown')}\n"
66
  if tree_data.get("type") == "directory":
 
67
  for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
68
  formatted += format_tree_structure(child, indent + " ")
69
  return formatted
70
 
71
+ def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
72
+ num_lines = len(code_content.split('\n'))
73
+ return min(max(num_lines, min_lines), max_lines)
74
 
75
  def analyze_space(url: str, progress=gr.Progress()):
 
 
 
 
 
 
 
76
  try:
77
  space_id = url.split('spaces/')[-1]
78
  if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
 
97
  usage = explain_usage(app_content)
98
 
99
  lines_for_app_py = adjust_lines_for_code(app_content)
 
100
  progress(1.0, desc="์™„๋ฃŒ")
101
+
102
  return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, lines_for_app_py
103
 
104
  except Exception as e:
 
107
  return f"์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}", "", None, "", "", "", "", 10
108
 
109
 
 
 
 
 
 
 
 
110
  # --------------------------------------------------
111
  # Gemini 2.0 Flash Thinking ๋ชจ๋ธ (LLM) ํ•จ์ˆ˜๋“ค
112
  # --------------------------------------------------
 
119
  """
120
  formatted = []
121
  for m in messages:
122
+ if hasattr(m, "metadata") and m.metadata: # 'Thinking' ๋ฉ”์‹œ์ง€๋Š” ๋ฌด์‹œ
123
  continue
124
  role = "assistant" if m.role == "assistant" else "user"
125
  formatted.append({"role": role, "parts": [m.content or ""]})
 
129
  import google.generativeai as genai
130
 
131
  def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
 
 
 
132
  init_msgs = [
133
  ChatMessage(role="system", content=system_message),
134
  ChatMessage(role="user", content=user_message)
 
156
  except Exception as e:
157
  return f"์š”์•ฝ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
158
 
 
159
  def analyze_code(app_content: str):
160
  system_msg = (
161
  "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
 
175
  except Exception as e:
176
  return f"๋ถ„์„ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
177
 
 
178
  def explain_usage(app_content: str):
179
  system_msg = (
180
  "You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
 
192
  def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
193
  """
194
  Gemini์— ์ŠคํŠธ๋ฆฌ๋ฐ ์š”์ฒญ.
195
+ user_message๊ฐ€ ๋นˆ ๋ฌธ์ž์—ด์ด์–ด๋„ ์˜ˆ์™ธ ์—†๏ฟฝ๏ฟฝ๏ฟฝ ์ฒ˜๋ฆฌ.
196
  """
197
+ # ๋นˆ ๋ฉ”์‹œ์ง€ ์ฒ˜๋ฆฌ
198
  if not user_message.strip():
199
  conversation_state.append(
200
  ChatMessage(
 
214
  response_buffer = ""
215
  thinking_complete = False
216
 
 
217
  conversation_state.append(
218
  ChatMessage(
219
  role="assistant",
 
274
  yield conversation_state
275
 
276
 
277
+ def convert_for_messages_format(messages: List[ChatMessage]) -> List[Dict[str, str]]:
278
  """
279
+ Chatbot์— type="messages"๋กœ ๋„˜๊ธฐ๊ธฐ ์œ„ํ•ด,
280
+ ChatMessage ๋ฆฌ์ŠคํŠธ -> [{"role": "...", "content": "..."}] ๋ชฉ๋ก์œผ๋กœ ๋ณ€ํ™˜
281
  """
282
+ output = []
283
+ for msg in messages:
284
+ output.append({"role": msg.role, "content": msg.content})
285
+ return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286
 
287
  def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
 
 
 
288
  conversation_state.append(ChatMessage(role="user", content=msg))
289
+ # ์ž…๋ ฅ์ฐฝ ํด๋ฆฌ์–ด
290
  return "", conversation_state
291
 
 
292
  def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
293
  """
294
+ Gemini ์ŠคํŠธ๋ฆฌ๋ฐ -> conversation_state ๊ฐฑ์‹  -> messages format์— ๋งž์ถฐ ๋ณ€ํ™˜ ํ›„ ๋ฐ˜ํ™˜
295
  """
296
  for updated_messages in stream_gemini_response(message, conversation_state):
297
+ # ChatMessage -> [{"role":"user"/"assistant", "content": "..."}] ๋กœ ๋ณ€ํ™˜
298
+ yield "", convert_for_messages_format(updated_messages)
299
 
300
 
301
  def create_ui():
 
 
 
302
  try:
303
  css = """
304
  footer {visibility: hidden;}
 
337
  with gr.TabItem("AI ์ฝ”๋“œ์ฑ—"):
338
  gr.Markdown("## ์˜ˆ์ œ๋ฅผ ์ž…๋ ฅ ๋˜๋Š” ์†Œ์Šค ์ฝ”๋“œ๋ฅผ ๋ถ™์—ฌ๋„ฃ๊ณ  ์งˆ๋ฌธํ•˜์„ธ์š”")
339
 
340
+ # Chatbot: type="messages"
341
  chatbot = gr.Chatbot(
342
  label="๋Œ€ํ™”",
343
  height=400,
344
+ type="messages" # ์ดํ›„์—” [{"role":..., "content":...}] ํ˜•์‹
345
  )
346
 
347
  msg = gr.Textbox(
 
375
  ]
376
  gr.Examples(examples, inputs=msg)
377
 
378
+ # ๋Œ€ํ™” ์ƒํƒœ
379
  conversation_state = gr.State([])
380
 
 
 
381
  msg.submit(
382
  user_submit_message,
383
  inputs=[msg, conversation_state],