ginipick's picture
Update app.py
4e8ea60 verified
raw
history blame
20.7 kB
import os
import gradio as gr
from gradio import ChatMessage
from typing import Iterator, List, Dict, Tuple, Any
import google.generativeai as genai
from huggingface_hub import HfApi
import requests
import re
import traceback
# HuggingFace ๊ด€๋ จ API ํ‚ค (์ŠคํŽ˜์ด์Šค ๋ถ„์„ ์šฉ)
HF_TOKEN = os.getenv("HF_TOKEN")
hf_api = HfApi(token=HF_TOKEN)
# Gemini 2.0 Flash Thinking ๋ชจ๋ธ ๊ด€๋ จ API ํ‚ค ๋ฐ ํด๋ผ์ด์–ธํŠธ (LLM ์šฉ)
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
genai.configure(api_key=GEMINI_API_KEY)
model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
def get_headers():
if not HF_TOKEN:
raise ValueError("Hugging Face token not found in environment variables")
return {"Authorization": f"Bearer {HF_TOKEN}"}
def get_file_content(space_id: str, file_path: str) -> str:
file_url = f"https://huggingface.co/spaces/{space_id}/raw/main/{file_path}"
try:
response = requests.get(file_url, headers=get_headers())
if response.status_code == 200:
return response.text
else:
return f"File not found or inaccessible: {file_path}"
except requests.RequestException:
return f"Error fetching content for file: {file_path}"
def get_space_structure(space_id: str) -> Dict:
try:
files = hf_api.list_repo_files(repo_id=space_id, repo_type="space")
tree = {"type": "directory", "path": "", "name": space_id, "children": []}
for file in files:
path_parts = file.split('/')
current = tree
for i, part in enumerate(path_parts):
if i == len(path_parts) - 1: # ํŒŒ์ผ
current["children"].append({"type": "file", "path": file, "name": part})
else:
found = False
for child in current["children"]:
if child["type"] == "directory" and child["name"] == part:
current = child
found = True
break
if not found:
new_dir = {"type": "directory", "path": '/'.join(path_parts[:i+1]), "name": part, "children": []}
current["children"].append(new_dir)
current = new_dir
return tree
except Exception as e:
print(f"Error in get_space_structure: {str(e)}")
return {"error": f"API request error: {str(e)}"}
def format_tree_structure(tree_data: Dict, indent: str = "") -> str:
if "error" in tree_data:
return tree_data["error"]
formatted = f"{indent}{'๐Ÿ“' if tree_data.get('type') == 'directory' else '๐Ÿ“„'} {tree_data.get('name', 'Unknown')}\n"
if tree_data.get("type") == "directory":
# ๋””๋ ‰ํ† ๋ฆฌ๋ฅผ ๋จผ์ €, ํŒŒ์ผ์„ ๋‚˜์ค‘์— ํ‘œ์‹œํ•˜๊ธฐ ์œ„ํ•ด ์ •๋ ฌ ์กฐ๊ฑด ์‚ฌ์šฉ
for child in sorted(tree_data.get("children", []), key=lambda x: (x.get("type", "") != "directory", x.get("name", ""))):
formatted += format_tree_structure(child, indent + " ")
return formatted
def analyze_space(url: str, progress=gr.Progress()):
"""
HuggingFace Space์˜ app.py์™€ ํŒŒ์ผ๊ตฌ์กฐ ๋“ฑ์„ ๋ถˆ๋Ÿฌ์™€์„œ:
1) ์ฝ”๋“œ ์š”์•ฝ
2) ์ฝ”๋“œ ๋ถ„์„
3) ์‚ฌ์šฉ๋ฒ•
๋“ฑ์„ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
"""
try:
space_id = url.split('spaces/')[-1]
if not re.match(r'^[\w.-]+/[\w.-]+$', space_id):
raise ValueError(f"Invalid Space ID format: {space_id}")
progress(0.1, desc="ํŒŒ์ผ ๊ตฌ์กฐ ๋ถ„์„ ์ค‘...")
tree_structure = get_space_structure(space_id)
if "error" in tree_structure:
raise ValueError(tree_structure["error"])
tree_view = format_tree_structure(tree_structure)
progress(0.3, desc="app.py ๋‚ด์šฉ ๊ฐ€์ ธ์˜ค๋Š” ์ค‘...")
app_content = get_file_content(space_id, "app.py")
progress(0.5, desc="์ฝ”๋“œ ์š”์•ฝ ์ค‘...")
summary = summarize_code(app_content)
progress(0.7, desc="์ฝ”๋“œ ๋ถ„์„ ์ค‘...")
analysis = analyze_code(app_content)
progress(0.9, desc="์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘...")
usage = explain_usage(app_content)
# lines ์ˆ˜ ์กฐ์ •
lines_for_app_py = adjust_lines_for_code(app_content)
progress(1.0, desc="์™„๋ฃŒ")
return app_content, tree_view, tree_structure, space_id, summary, analysis, usage, lines_for_app_py
except Exception as e:
print(f"Error in analyze_space: {str(e)}")
print(traceback.format_exc())
return f"์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ–ˆ์Šต๋‹ˆ๋‹ค: {str(e)}", "", None, "", "", "", "", 10
def adjust_lines_for_code(code_content: str, min_lines: int = 10, max_lines: int = 100) -> int:
"""
์ฝ”๋“œ์˜ ์ค„ ์ˆ˜์— ๋งž์ถฐ ํ‘œ์‹œํ•  lines ์ˆ˜๋ฅผ ๋™์ ์œผ๋กœ ์กฐ์ •ํ•ฉ๋‹ˆ๋‹ค.
"""
num_lines = len(code_content.split('\n'))
return min(max(num_lines, min_lines), max_lines)
# --------------------------------------------------
# Gemini 2.0 Flash Thinking ๋ชจ๋ธ ๊ด€๋ จ (LLM) ํ•จ์ˆ˜๋“ค
# --------------------------------------------------
from gradio import ChatMessage
def format_chat_history(messages: List[ChatMessage]) -> List[Dict]:
"""
ChatMessage ๋ชฉ๋ก์„ Gemini ๋ชจ๋ธ์ด ์ดํ•ดํ•  ์ˆ˜ ์žˆ๋Š” ํ˜•์‹์œผ๋กœ ๋ณ€ํ™˜
(Thinking ๋ฉ”ํƒ€๋ฐ์ดํ„ฐ ํฌํ•จ ๋ฉ”์‹œ์ง€๋Š” ๋ฌด์‹œ)
"""
formatted = []
for m in messages:
# 'Thinking' metadata๊ฐ€ ์žˆ์œผ๋ฉด ๋ฌด์‹œ
if hasattr(m, "metadata") and m.metadata:
continue
role = "assistant" if m.role == "assistant" else "user"
formatted.append({"role": role, "parts": [m.content or ""]})
return formatted
def gemini_chat_completion(system_message: str, user_message: str, max_tokens: int = 200, temperature: float = 0.7) -> str:
"""
์‹œ์Šคํ…œ ๋ฉ”์‹œ์ง€์™€ ์œ ์ € ๋ฉ”์‹œ์ง€๋ฅผ ๋ฐ›์•„ Gemini์— ์ŠคํŠธ๋ฆฌ๋ฐ ์š”์ฒญ,
์ตœ์ข… ์‘๋‹ต ํ…์ŠคํŠธ๋ฅผ ๋ฐ˜ํ™˜ํ•ฉ๋‹ˆ๋‹ค.
"""
init_msgs = [
ChatMessage(role="system", content=system_message),
ChatMessage(role="user", content=user_message)
]
chat_history = format_chat_history(init_msgs)
chat = model.start_chat(history=chat_history)
final = ""
try:
for chunk in chat.send_message(user_message, stream=True):
parts = chunk.candidates[0].content.parts
if len(parts) == 2:
# Thinking + ์ตœ์ข…์‘๋‹ต
final += parts[1].text
else:
final += parts[0].text
return final.strip()
except Exception as e:
return f"LLM ํ˜ธ์ถœ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
def summarize_code(app_content: str):
system_msg = "๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜๊ณ  ์š”์•ฝํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ 3์ค„ ์ด๋‚ด๋กœ ๊ฐ„๊ฒฐํ•˜๊ฒŒ ์š”์•ฝํ•ด์ฃผ์„ธ์š”."
user_msg = f"๋‹ค์Œ Python ์ฝ”๋“œ๋ฅผ 3์ค„ ์ด๋‚ด๋กœ ์š”์•ฝํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
try:
return gemini_chat_completion(system_msg, user_msg, max_tokens=200, temperature=0.7)
except Exception as e:
return f"์š”์•ฝ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
def analyze_code(app_content: str):
# ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ์— '๋”ฅ์”ฝํ‚น' ์•ˆ๋‚ด๋ฌธ ์ถ”๊ฐ€
system_msg = (
"You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
"and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
"You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem. "
"๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜์—ฌ ์„œ๋น„์Šค์˜ ํšจ์šฉ์„ฑ๊ณผ ํ™œ์šฉ ์ธก๋ฉด์—์„œ ๋‹ค์Œ ํ•ญ๋ชฉ์— ๋Œ€ํ•ด ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”:\n"
"A. ๋ฐฐ๊ฒฝ ๋ฐ ํ•„์š”์„ฑ\n"
"B. ๊ธฐ๋Šฅ์  ํšจ์šฉ์„ฑ ๋ฐ ๊ฐ€์น˜\n"
"C. ํŠน์žฅ์ \n"
"D. ์ ์šฉ ๋Œ€์ƒ ๋ฐ ํƒ€๊ฒŸ\n"
"E. ๊ธฐ๋Œ€ํšจ๊ณผ\n"
"๊ธฐ์กด ๋ฐ ์œ ์‚ฌ ํ”„๋กœ์ ํŠธ์™€ ๋น„๊ตํ•˜์—ฌ ๋ถ„์„ํ•ด์ฃผ์„ธ์š”. Markdown ํ˜•์‹์œผ๋กœ ์ถœ๋ ฅํ•˜์„ธ์š”."
)
user_msg = f"๋‹ค์Œ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
try:
return gemini_chat_completion(system_msg, user_msg, max_tokens=1000, temperature=0.7)
except Exception as e:
return f"๋ถ„์„ ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
def explain_usage(app_content: str):
# ์‹œ์Šคํ…œ ํ”„๋กฌํ”„ํŠธ์— '๋”ฅ์”ฝํ‚น' ์•ˆ๋‚ด๋ฌธ ์ถ”๊ฐ€
system_msg = (
"You are a deep thinking AI. You may use extremely long chains of thought to deeply consider the problem "
"and deliberate with yourself via systematic reasoning processes to help come to a correct solution prior to answering. "
"You should enclose your thoughts and internal monologue inside tags, and then provide your solution or response to the problem. "
"๋‹น์‹ ์€ Python ์ฝ”๋“œ๋ฅผ ๋ถ„์„ํ•˜์—ฌ ์‚ฌ์šฉ๋ฒ•์„ ์„ค๋ช…ํ•˜๋Š” AI ์กฐ์ˆ˜์ž…๋‹ˆ๋‹ค. ์ฃผ์–ด์ง„ ์ฝ”๋“œ๋ฅผ ๋ฐ”ํƒ•์œผ๋กœ ๋งˆ์น˜ ํ™”๋ฉด์„ ๋ณด๋Š” ๊ฒƒ์ฒ˜๋Ÿผ ์‚ฌ์šฉ๋ฒ•์„ ์ƒ์„ธํžˆ ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”. Markdown ํ˜•์‹์œผ๋กœ ์ถœ๋ ฅํ•˜์„ธ์š”."
)
user_msg = f"๋‹ค์Œ Python ์ฝ”๋“œ์˜ ์‚ฌ์šฉ๋ฒ•์„ ์„ค๋ช…ํ•ด์ฃผ์„ธ์š”:\n\n{app_content}"
try:
return gemini_chat_completion(system_msg, user_msg, max_tokens=800, temperature=0.7)
except Exception as e:
return f"์‚ฌ์šฉ๋ฒ• ์„ค๋ช… ์ƒ์„ฑ ์ค‘ ์˜ค๋ฅ˜ ๋ฐœ์ƒ: {str(e)}"
def stream_gemini_response(user_message: str, conversation_state: List[ChatMessage]) -> Iterator[List[ChatMessage]]:
"""
conversation_state: ChatMessage ๊ฐ์ฒด๋กœ๋งŒ ์ด๋ฃจ์–ด์ง„ '๋Œ€ํ™” ์ด๋ ฅ' (Gradio State).
(์ˆ˜์ •) ๋นˆ ๋ฌธ์ž์—ด์ด์–ด๋„ ์ฒ˜๋ฆฌํ•˜๋„๋ก ๋ณ€๊ฒฝ. ์—๋Ÿฌ๋ฅผ ๋„์šฐ์ง€ ์•Š์Œ.
"""
# ๊ธฐ์กด์—๋Š” if not user_message.strip(): ... return ํ–ˆ์œผ๋‚˜,
# "Please provide a non-empty text message..." ์˜ค๋ฅ˜๊ฐ€ ๋ถˆํŽธํ•˜๋‹ค๋Š” ์š”์ฒญ์œผ๋กœ ์ œ๊ฑฐ/์™„ํ™”ํ•จ.
# ํ•„์š”ํ•˜๋‹ค๋ฉด user_message๊ฐ€ ์ •๋ง ์•„๋ฌด๊ฒƒ๋„ ์—†์„ ๋•Œ ์ฒ˜๋ฆฌ ๋กœ์ง์„ ์ถ”๊ฐ€ํ•˜์„ธ์š”.
print(f"\n=== New Request ===\nUser message: {user_message if user_message.strip() else '(Empty)'}")
# ๊ธฐ์กด ๋Œ€ํ™”๋ฅผ Gemini ํ˜•์‹์œผ๋กœ ๋ณ€ํ™˜
chat_history = format_chat_history(conversation_state)
chat = model.start_chat(history=chat_history)
response = chat.send_message(user_message, stream=True)
thought_buffer = ""
response_buffer = ""
thinking_complete = False
# 'Thinking' ํ‘œ์‹œ์šฉ
conversation_state.append(
ChatMessage(
role="assistant",
content="",
metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
)
)
try:
for chunk in response:
parts = chunk.candidates[0].content.parts
current_chunk = parts[0].text
if len(parts) == 2 and not thinking_complete:
thought_buffer += current_chunk
print(f"\n=== Complete Thought ===\n{thought_buffer}")
conversation_state[-1] = ChatMessage(
role="assistant",
content=thought_buffer,
metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
)
yield conversation_state
response_buffer = parts[1].text
print(f"\n=== Starting Response ===\n{response_buffer}")
conversation_state.append(
ChatMessage(role="assistant", content=response_buffer)
)
thinking_complete = True
elif thinking_complete:
response_buffer += current_chunk
print(f"\n=== Response Chunk ===\n{current_chunk}")
conversation_state[-1] = ChatMessage(
role="assistant",
content=response_buffer
)
else:
thought_buffer += current_chunk
print(f"\n=== Thinking Chunk ===\n{current_chunk}")
conversation_state[-1] = ChatMessage(
role="assistant",
content=thought_buffer,
metadata={"title": "โš™๏ธ Thinking: *The thoughts produced by the model are experimental"}
)
yield conversation_state
print(f"\n=== Final Response ===\n{response_buffer}")
except Exception as e:
print(f"\n=== Error ===\n{str(e)}")
conversation_state.append(
ChatMessage(
role="assistant",
content=f"I apologize, but I encountered an error: {str(e)}"
)
)
yield conversation_state
def convert_to_display_tuples(messages: List[ChatMessage]) -> List[Tuple[str, str]]:
"""
ํ™”๋ฉด์— ํ‘œ์‹œํ•˜๊ธฐ ์œ„ํ•ด (user, assistant) ํŠœํ”Œ ๋ชฉ๋ก์œผ๋กœ ๋ณ€ํ™˜
"""
result = []
i = 0
while i < len(messages):
if messages[i].role == "user":
user_text = messages[i].content
assistant_text = ""
if i + 1 < len(messages) and messages[i+1].role == "assistant":
assistant_text = messages[i+1].content
i += 2
else:
i += 1
result.append((user_text, assistant_text))
else:
# assistant ๋‹จ๋…
result.append(("", messages[i].content))
i += 1
return result
def user_submit_message(msg: str, conversation_state: List[ChatMessage]):
"""
์‚ฌ์šฉ์ž๊ฐ€ ๋ฉ”์‹œ์ง€๋ฅผ ์ž…๋ ฅํ•  ๋•Œ ํ˜ธ์ถœ.
ChatMessage ๋ฆฌ์ŠคํŠธ(conversation_state)์— user ๋ฉ”์‹œ์ง€๋ฅผ ์ถ”๊ฐ€ํ•œ ๋’ค ๋ฐ˜ํ™˜.
"""
conversation_state.append(ChatMessage(role="user", content=msg))
# ์ž…๋ ฅ์ฐฝ์€ ๋น„์›Œ์คŒ
return "", conversation_state
def respond_wrapper(message: str, conversation_state: List[ChatMessage], max_tokens, temperature, top_p):
"""
์œ ์ € ๋ฉ”์‹œ์ง€๋ฅผ ๋ฐ›์•„ Gemini์—๊ฒŒ ์š”์ฒญ(์ŠคํŠธ๋ฆฌ๋ฐ)ํ•˜๊ณ , ๋Œ€ํ™” ์ด๋ ฅ์„ ์—…๋ฐ์ดํŠธ ํ›„
ํ™”๋ฉด์—๋Š” (user, assistant) ํŠœํ”Œ์„ ๋ฐ˜ํ™˜ํ•œ๋‹ค.
"""
for updated_messages in stream_gemini_response(message, conversation_state):
# ํ™”๋ฉด ํ‘œ์‹œ์šฉ (user, assistant) ํŠœํ”Œ๋กœ ๋ณ€ํ™˜
yield "", convert_to_display_tuples(updated_messages)
def create_ui():
"""
Gradio UI๋ฅผ ๊ตฌ์„ฑํ•˜๋Š” ํ•จ์ˆ˜
"""
try:
css = """
footer {visibility: hidden;}
"""
with gr.Blocks(css=css) as demo:
gr.Markdown("# MOUSE: Space Research Thinking")
with gr.Tabs():
with gr.TabItem("๋ถ„์„"):
with gr.Row():
with gr.Column():
url_input = gr.Textbox(label="HuggingFace Space URL")
analyze_button = gr.Button("๋ถ„์„")
summary_output = gr.Markdown(label="์š”์•ฝ")
analysis_output = gr.Markdown(label="๋ถ„์„")
usage_output = gr.Markdown(label="์‚ฌ์šฉ๋ฒ•")
tree_view_output = gr.Textbox(label="ํŒŒ์ผ ๊ตฌ์กฐ", lines=20)
with gr.Column():
code_tabs = gr.Tabs()
with code_tabs:
with gr.TabItem("app.py"):
app_py_content = gr.Code(
language="python",
label="app.py",
lines=50
)
with gr.TabItem("requirements.txt"):
requirements_content = gr.Textbox(
label="requirements.txt",
lines=50
)
with gr.TabItem("AI ์ฝ”๋“œ์ฑ—"):
gr.Markdown("## ์˜ˆ์ œ๋ฅผ ์ž…๋ ฅ ๋˜๋Š” ์†Œ์Šค ์ฝ”๋“œ๋ฅผ ๋ถ™์—ฌ๋„ฃ๊ณ  ์งˆ๋ฌธํ•˜์„ธ์š”")
# Chatbot์€ ๋‹จ์ง€ ์ถœ๋ ฅ๋งŒ ๋‹ด๋‹น(ํŠœํ”Œ์„ ๋ฐ›์•„ ํ‘œ์‹œ)
chatbot = gr.Chatbot(
label="๋Œ€ํ™”",
height=400
)
msg = gr.Textbox(
label="๋ฉ”์‹œ์ง€",
placeholder="๋ฉ”์‹œ์ง€๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”..."
)
# ์ˆจ๊ฒจ์ง„ ํŒŒ๋ผ๋ฏธํ„ฐ
max_tokens = gr.Slider(
minimum=1, maximum=8000,
value=4000, label="Max Tokens",
visible=False
)
temperature = gr.Slider(
minimum=0, maximum=1,
value=0.7, label="Temperature",
visible=False
)
top_p = gr.Slider(
minimum=0, maximum=1,
value=0.9, label="Top P",
visible=False
)
examples = [
["์ƒ์„ธํ•œ ์‚ฌ์šฉ ๋ฐฉ๋ฒ•์„ 4000 ํ† ํฐ ์ด์ƒ ์ƒ์„ธํžˆ ์„ค๋ช…"],
["FAQ 20๊ฑด์„ 4000 ํ† ํฐ ์ด์ƒ ์ž‘์„ฑ"],
["๊ธฐ์ˆ  ์ฐจ๋ณ„์ , ๊ฐ•์ ์„ ์ค‘์‹ฌ์œผ๋กœ 4000 ํ† ํฐ ์ด์ƒ ์„ค๋ช…"],
["ํŠนํ—ˆ ์ถœ์›์— ํ™œ์šฉ ๊ฐ€๋Šฅํ•œ ํ˜์‹  ์•„์ด๋””์–ด๋ฅผ 4000 ํ† ํฐ ์ด์ƒ ์ž‘์„ฑ"],
["๋…ผ๋ฌธ ํ˜•์‹์œผ๋กœ 4000 ํ† ํฐ ์ด์ƒ ์ž‘์„ฑ"],
["๊ณ„์† ์ด์–ด์„œ ๋‹ต๋ณ€ํ•˜๋ผ"]
]
gr.Examples(examples, inputs=msg)
# ๋Œ€ํ™” ์ƒํƒœ(์ฑ„ํŒ… ๊ธฐ๋ก)๋Š” ChatMessage ๊ฐ์ฒด๋กœ๋งŒ ์œ ์ง€
conversation_state = gr.State([])
# ์ด๋ฒคํŠธ ์ฒด์ธ
# 1) ์œ ์ € ๋ฉ”์‹œ์ง€ -> user_submit_message -> (์ž…๋ ฅ์ฐฝ ๋น„์›€ + state์ถ”๊ฐ€)
# 2) respond_wrapper -> Gemini ์ŠคํŠธ๋ฆฌ๋ฐ -> ๋Œ€ํ™” state ๊ฐฑ์‹  -> (user,assistant) ํŠœํ”Œ ๋ณ€ํ™˜
msg.submit(
user_submit_message,
inputs=[msg, conversation_state],
outputs=[msg, conversation_state],
queue=False
).then(
respond_wrapper,
inputs=[msg, conversation_state, max_tokens, temperature, top_p],
outputs=[msg, chatbot],
)
with gr.TabItem("Recommended Best"):
gr.Markdown(
"Discover recommended HuggingFace Spaces [here](https://huggingface.co/spaces/openfree/Korean-Leaderboard)."
)
# ๋ถ„์„ ๋ฒ„ํŠผ ๋กœ์ง
space_id_state = gr.State()
tree_structure_state = gr.State()
app_py_content_lines = gr.State()
analyze_button.click(
analyze_space,
inputs=[url_input],
outputs=[
app_py_content,
tree_view_output,
tree_structure_state,
space_id_state,
summary_output,
analysis_output,
usage_output,
app_py_content_lines
]
).then(
lambda space_id: get_file_content(space_id, "requirements.txt"),
inputs=[space_id_state],
outputs=[requirements_content]
).then(
lambda lines: gr.update(lines=lines),
inputs=[app_py_content_lines],
outputs=[app_py_content]
)
return demo
except Exception as e:
print(f"Error in create_ui: {str(e)}")
print(traceback.format_exc())
raise
if __name__ == "__main__":
try:
print("Starting HuggingFace Space Analyzer...")
demo = create_ui()
print("UI created successfully.")
print("Configuring Gradio queue...")
demo.queue()
print("Gradio queue configured.")
print("Launching Gradio app...")
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
debug=True,
show_api=False
)
print("Gradio app launched successfully.")
except Exception as e:
print(f"Error in main: {str(e)}")
print("Detailed error information:")
print(traceback.format_exc())
raise