Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -17,13 +17,11 @@ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
|
17 |
genai.configure(api_key=GEMINI_API_KEY)
|
18 |
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
|
19 |
|
20 |
-
|
21 |
def get_headers():
|
22 |
if not HF_TOKEN:
|
23 |
raise ValueError("Hugging Face token not found in environment variables")
|
24 |
return {"Authorization": f"Bearer {HF_TOKEN}"}
|
25 |
|
26 |
-
|
27 |
def get_file_content(space_id: str, file_path: str) -> str:
|
28 |
file_url = f"https://huggingface.co/spaces/{space_id}/raw/main/{file_path}"
|
29 |
try:
|
@@ -35,7 +33,6 @@ def get_file_content(space_id: str, file_path: str) -> str:
|
|
35 |
except requests.RequestException:
|
36 |
return f"Error fetching content for file: {file_path}"
|
37 |
|
38 |
-
|
39 |
def get_space_structure(space_id: str) -> Dict:
|
40 |
try:
|
41 |
files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
|
@@ -62,26 +59,20 @@ def get_space_structure(space_id: str) -> Dict:
|
|
62 |
print(f"Error in get_space_structure: {str(e)}")
|
63 |
return {"error": f"API request error: {str(e)}"}
|
64 |
|
65 |
-
|
66 |
def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
|
67 |
if "error" in tree_data:
|
68 |
return tree_data["error"]
|
69 |
formatted = f"{indent}{'๐' if tree_data.get('type') == 'directory' else '๐'} {tree_data.get('name', 'Unknown')}\n"
|
70 |
if tree_data.get("type") == "directory":
|
71 |
-
# ๋๋ ํ ๋ฆฌ๋ฅผ ๋จผ์ , ํ์ผ์ ๋์ค์ ํ์
|
72 |
for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
|
73 |
formatted += format_tree_structure(child, indent + " ")
|
74 |
return formatted
|
75 |
|
|
|
|
|
|
|
76 |
|
77 |
def analyze_space(url: str, progress=gr.Progress()):
|
78 |
-
"""
|
79 |
-
HuggingFace Space์ app.py์ ํ์ผ๊ตฌ์กฐ ๋ฑ์ ๋ถ๋ฌ์์:
|
80 |
-
1) ์ฝ๋ ์์ฝ
|
81 |
-
2) ์ฝ๋ ๋ถ์
|
82 |
-
3) ์ฌ์ฉ๋ฒ
|
83 |
-
๋ฑ์ ๋ฐํํฉ๋๋ค.
|
84 |
-
"""
|
85 |
try:
|
86 |
space_id = url.split('spaces/')[-1]
|
87 |
if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
|
@@ -106,8 +97,8 @@ def analyze_space(url: str, progress=gr.Progress()):
|
|
106 |
usage = explain_usage(app_content)
|
107 |
|
108 |
lines_for_app_py = adjust_lines_for_code(app_content)
|
109 |
-
|
110 |
progress(1.0, desc="์๋ฃ")
|
|
|
111 |
return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, lines_for_app_py
|
112 |
|
113 |
except Exception as e:
|
@@ -116,13 +107,6 @@ def analyze_space(url: str, progress=gr.Progress()):
|
|
116 |
return f"์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {str(e)}", "", None, "", "", "", "", 10
|
117 |
|
118 |
|
119 |
-
def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
|
120 |
-
"""
|
121 |
-
์ฝ๋์ ์ค ์์ ๋ง์ถฐ ํ์ํ lines ์๋ฅผ ๋์ ์ผ๋ก ์กฐ์ ํฉ๋๋ค.
|
122 |
-
"""
|
123 |
-
num_lines = len(code_content.split('\n'))
|
124 |
-
return min(max(num_lines, min_lines), max_lines)
|
125 |
-
|
126 |
# --------------------------------------------------
|
127 |
# Gemini 2.0 Flash Thinking ๋ชจ๋ธ (LLM) ํจ์๋ค
|
128 |
# --------------------------------------------------
|
@@ -135,7 +119,7 @@ def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
|
|
135 |
"""
|
136 |
formatted = []
|
137 |
for m in messages:
|
138 |
-
if hasattr(m, "metadata") and m.metadata: # 'Thinking' ๋ฉ์์ง๋
|
139 |
continue
|
140 |
role = "assistant" if m.role == "assistant" else "user"
|
141 |
formatted.append({"role": role, "parts": [m.content or ""]})
|
@@ -145,9 +129,6 @@ def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
|
|
145 |
import google.generativeai as genai
|
146 |
|
147 |
def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
|
148 |
-
"""
|
149 |
-
์์คํ
& ์ ์ ๋ฉ์์ง๋ก Gemini ๋ชจ๋ธ์๊ฒ ์คํธ๋ฆฌ๋ฐ ์์ฒญ. ์ต์ข
ํ
์คํธ ๋ฐํ
|
150 |
-
"""
|
151 |
init_msgs = [
|
152 |
ChatMessage(role="system", content=system_message),
|
153 |
ChatMessage(role="user", content=user_message)
|
@@ -175,7 +156,6 @@ def summarize_code(app_content: str):
|
|
175 |
except Exception as e:
|
176 |
return f"์์ฝ ์์ฑ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"
|
177 |
|
178 |
-
|
179 |
def analyze_code(app_content: str):
|
180 |
system_msg = (
|
181 |
"You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
|
@@ -195,7 +175,6 @@ def analyze_code(app_content: str):
|
|
195 |
except Exception as e:
|
196 |
return f"๋ถ์ ์์ฑ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"
|
197 |
|
198 |
-
|
199 |
def explain_usage(app_content: str):
|
200 |
system_msg = (
|
201 |
"You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
|
@@ -213,9 +192,9 @@ def explain_usage(app_content: str):
|
|
213 |
def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
|
214 |
"""
|
215 |
Gemini์ ์คํธ๋ฆฌ๋ฐ ์์ฒญ.
|
216 |
-
๋น
|
217 |
"""
|
218 |
-
#
|
219 |
if not user_message.strip():
|
220 |
conversation_state.append(
|
221 |
ChatMessage(
|
@@ -235,7 +214,6 @@ def stream_gemini_response(user_message: str, conversation_state: List[ChatMessa
|
|
235 |
response_buffer = ""
|
236 |
thinking_complete = False
|
237 |
|
238 |
-
# 'Thinking' ํ์์ฉ ๋ฉ์์ง ์ถ๊ฐ
|
239 |
conversation_state.append(
|
240 |
ChatMessage(
|
241 |
role="assistant",
|
@@ -296,50 +274,31 @@ def stream_gemini_response(user_message: str, conversation_state: List[ChatMessa
|
|
296 |
yield conversation_state
|
297 |
|
298 |
|
299 |
-
def
|
300 |
"""
|
301 |
-
|
|
|
302 |
"""
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
user_text = messages[i].content
|
308 |
-
assistant_text = ""
|
309 |
-
if i + 1 < len(messages) and messages[i+1].role == "assistant":
|
310 |
-
assistant_text = messages[i+1].content
|
311 |
-
i += 2
|
312 |
-
else:
|
313 |
-
i += 1
|
314 |
-
result.append((user_text, assistant_text))
|
315 |
-
else:
|
316 |
-
# assistant ๋จ๋
|
317 |
-
result.append(("", messages[i].content))
|
318 |
-
i += 1
|
319 |
-
return result
|
320 |
-
|
321 |
|
322 |
def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
|
323 |
-
"""
|
324 |
-
์ฌ์ฉ์๊ฐ ๋ฉ์์ง๋ฅผ ์
๋ ฅํ ๋ ํธ์ถ
|
325 |
-
"""
|
326 |
conversation_state.append(ChatMessage(role="user", content=msg))
|
327 |
-
#
|
328 |
return "", conversation_state
|
329 |
|
330 |
-
|
331 |
def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
|
332 |
"""
|
333 |
-
Gemini
|
334 |
"""
|
335 |
for updated_messages in stream_gemini_response(message, conversation_state):
|
336 |
-
|
|
|
337 |
|
338 |
|
339 |
def create_ui():
|
340 |
-
"""
|
341 |
-
Gradio UI๋ฅผ ๊ตฌ์ฑํ๋ ํจ์
|
342 |
-
"""
|
343 |
try:
|
344 |
css = """
|
345 |
footer {visibility: hidden;}
|
@@ -378,11 +337,11 @@ def create_ui():
|
|
378 |
with gr.TabItem("AI ์ฝ๋์ฑ"):
|
379 |
gr.Markdown("## ์์ ๋ฅผ ์
๋ ฅ ๋๋ ์์ค ์ฝ๋๋ฅผ ๋ถ์ฌ๋ฃ๊ณ ์ง๋ฌธํ์ธ์")
|
380 |
|
381 |
-
# Chatbot
|
382 |
chatbot = gr.Chatbot(
|
383 |
label="๋ํ",
|
384 |
height=400,
|
385 |
-
type="messages"
|
386 |
)
|
387 |
|
388 |
msg = gr.Textbox(
|
@@ -416,11 +375,9 @@ def create_ui():
|
|
416 |
]
|
417 |
gr.Examples(examples, inputs=msg)
|
418 |
|
419 |
-
# ๋ํ ์ํ
|
420 |
conversation_state = gr.State([])
|
421 |
|
422 |
-
# 1) ์ ์ ๋ฉ์์ง ์
๋ ฅ -> user_submit_message
|
423 |
-
# 2) respond_wrapper -> Gemini ์คํธ๋ฆฌ๋ฐ -> ๋ํ ์
๋ฐ์ดํธ -> (user,assistant) ๋ณํํ์ฌ chatbot ํ์
|
424 |
msg.submit(
|
425 |
user_submit_message,
|
426 |
inputs=[msg, conversation_state],
|
|
|
17 |
genai.configure(api_key=GEMINI_API_KEY)
|
18 |
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
|
19 |
|
|
|
20 |
def get_headers():
|
21 |
if not HF_TOKEN:
|
22 |
raise ValueError("Hugging Face token not found in environment variables")
|
23 |
return {"Authorization": f"Bearer {HF_TOKEN}"}
|
24 |
|
|
|
25 |
def get_file_content(space_id: str, file_path: str) -> str:
|
26 |
file_url = f"https://huggingface.co/spaces/{space_id}/raw/main/{file_path}"
|
27 |
try:
|
|
|
33 |
except requests.RequestException:
|
34 |
return f"Error fetching content for file: {file_path}"
|
35 |
|
|
|
36 |
def get_space_structure(space_id: str) -> Dict:
|
37 |
try:
|
38 |
files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
|
|
|
59 |
print(f"Error in get_space_structure: {str(e)}")
|
60 |
return {"error": f"API request error: {str(e)}"}
|
61 |
|
|
|
62 |
def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
|
63 |
if "error" in tree_data:
|
64 |
return tree_data["error"]
|
65 |
formatted = f"{indent}{'๐' if tree_data.get('type') == 'directory' else '๐'} {tree_data.get('name', 'Unknown')}\n"
|
66 |
if tree_data.get("type") == "directory":
|
|
|
67 |
for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
|
68 |
formatted += format_tree_structure(child, indent + " ")
|
69 |
return formatted
|
70 |
|
71 |
+
def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
|
72 |
+
num_lines = len(code_content.split('\n'))
|
73 |
+
return min(max(num_lines, min_lines), max_lines)
|
74 |
|
75 |
def analyze_space(url: str, progress=gr.Progress()):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
try:
|
77 |
space_id = url.split('spaces/')[-1]
|
78 |
if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
|
|
|
97 |
usage = explain_usage(app_content)
|
98 |
|
99 |
lines_for_app_py = adjust_lines_for_code(app_content)
|
|
|
100 |
progress(1.0, desc="์๋ฃ")
|
101 |
+
|
102 |
return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, lines_for_app_py
|
103 |
|
104 |
except Exception as e:
|
|
|
107 |
return f"์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค: {str(e)}", "", None, "", "", "", "", 10
|
108 |
|
109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
# --------------------------------------------------
|
111 |
# Gemini 2.0 Flash Thinking ๋ชจ๋ธ (LLM) ํจ์๋ค
|
112 |
# --------------------------------------------------
|
|
|
119 |
"""
|
120 |
formatted = []
|
121 |
for m in messages:
|
122 |
+
if hasattr(m, "metadata") and m.metadata: # 'Thinking' ๋ฉ์์ง๋ ๋ฌด์
|
123 |
continue
|
124 |
role = "assistant" if m.role == "assistant" else "user"
|
125 |
formatted.append({"role": role, "parts": [m.content or ""]})
|
|
|
129 |
import google.generativeai as genai
|
130 |
|
131 |
def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
|
|
|
|
|
|
|
132 |
init_msgs = [
|
133 |
ChatMessage(role="system", content=system_message),
|
134 |
ChatMessage(role="user", content=user_message)
|
|
|
156 |
except Exception as e:
|
157 |
return f"์์ฝ ์์ฑ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"
|
158 |
|
|
|
159 |
def analyze_code(app_content: str):
|
160 |
system_msg = (
|
161 |
"You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
|
|
|
175 |
except Exception as e:
|
176 |
return f"๋ถ์ ์์ฑ ์ค ์ค๋ฅ ๋ฐ์: {str(e)}"
|
177 |
|
|
|
178 |
def explain_usage(app_content: str):
|
179 |
system_msg = (
|
180 |
"You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
|
|
|
192 |
def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
|
193 |
"""
|
194 |
Gemini์ ์คํธ๋ฆฌ๋ฐ ์์ฒญ.
|
195 |
+
user_message๊ฐ ๋น ๋ฌธ์์ด์ด์ด๋ ์์ธ ์๏ฟฝ๏ฟฝ๏ฟฝ ์ฒ๋ฆฌ.
|
196 |
"""
|
197 |
+
# ๋น ๋ฉ์์ง ์ฒ๋ฆฌ
|
198 |
if not user_message.strip():
|
199 |
conversation_state.append(
|
200 |
ChatMessage(
|
|
|
214 |
response_buffer = ""
|
215 |
thinking_complete = False
|
216 |
|
|
|
217 |
conversation_state.append(
|
218 |
ChatMessage(
|
219 |
role="assistant",
|
|
|
274 |
yield conversation_state
|
275 |
|
276 |
|
277 |
+
def convert_for_messages_format(messages: List[ChatMessage]) -> List[Dict[str, str]]:
|
278 |
"""
|
279 |
+
Chatbot์ type="messages"๋ก ๋๊ธฐ๊ธฐ ์ํด,
|
280 |
+
ChatMessage ๋ฆฌ์คํธ -> [{"role": "...", "content": "..."}] ๋ชฉ๋ก์ผ๋ก ๋ณํ
|
281 |
"""
|
282 |
+
output = []
|
283 |
+
for msg in messages:
|
284 |
+
output.append({"role": msg.role, "content": msg.content})
|
285 |
+
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
286 |
|
287 |
def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
|
|
|
|
|
|
|
288 |
conversation_state.append(ChatMessage(role="user", content=msg))
|
289 |
+
# ์
๋ ฅ์ฐฝ ํด๋ฆฌ์ด
|
290 |
return "", conversation_state
|
291 |
|
|
|
292 |
def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
|
293 |
"""
|
294 |
+
Gemini ์คํธ๋ฆฌ๋ฐ -> conversation_state ๊ฐฑ์ -> messages format์ ๋ง์ถฐ ๋ณํ ํ ๋ฐํ
|
295 |
"""
|
296 |
for updated_messages in stream_gemini_response(message, conversation_state):
|
297 |
+
# ChatMessage -> [{"role":"user"/"assistant", "content": "..."}] ๋ก ๋ณํ
|
298 |
+
yield "", convert_for_messages_format(updated_messages)
|
299 |
|
300 |
|
301 |
def create_ui():
|
|
|
|
|
|
|
302 |
try:
|
303 |
css = """
|
304 |
footer {visibility: hidden;}
|
|
|
337 |
with gr.TabItem("AI ์ฝ๋์ฑ"):
|
338 |
gr.Markdown("## ์์ ๋ฅผ ์
๋ ฅ ๋๋ ์์ค ์ฝ๋๋ฅผ ๋ถ์ฌ๋ฃ๊ณ ์ง๋ฌธํ์ธ์")
|
339 |
|
340 |
+
# Chatbot: type="messages"
|
341 |
chatbot = gr.Chatbot(
|
342 |
label="๋ํ",
|
343 |
height=400,
|
344 |
+
type="messages" # ์ดํ์ [{"role":..., "content":...}] ํ์
|
345 |
)
|
346 |
|
347 |
msg = gr.Textbox(
|
|
|
375 |
]
|
376 |
gr.Examples(examples, inputs=msg)
|
377 |
|
378 |
+
# ๋ํ ์ํ
|
379 |
conversation_state = gr.State([])
|
380 |
|
|
|
|
|
381 |
msg.submit(
|
382 |
user_submit_message,
|
383 |
inputs=[msg, conversation_state],
|