shamik
commited on
feat: adding the gradio app.
Browse files
app.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
import nest_asyncio
|
5 |
+
from huggingface_hub import login
|
6 |
+
|
7 |
+
from src.agent_hackathon.consts import PROJECT_ROOT_DIR
|
8 |
+
from src.agent_hackathon.logger import get_logger
|
9 |
+
from src.agent_hackathon.multiagent import MultiAgentWorkflow
|
10 |
+
|
11 |
+
nest_asyncio.apply()
|
12 |
+
|
13 |
+
logger = get_logger(log_name="multiagent", log_dir=PROJECT_ROOT_DIR / "logs")
|
14 |
+
|
15 |
+
PRIMARY_HEADING = """# ML Topics Deep Research"""
|
16 |
+
SECONDARY_HEADING = """### This multi agent framework searches the web for relevant events, queries a DB containing arxiv ML research papers from Jan 2020 - Jun 6th 2025 for select categories, finds relevant content across different websites to answer the users query.
|
17 |
+
|
18 |
+
For more details on the filtered arxiv ds refer [here](https://huggingface.co/datasets/Shamik/arxiv_cs_2020_07_2025)
|
19 |
+
"""
|
20 |
+
workflow = MultiAgentWorkflow()
|
21 |
+
|
22 |
+
_login_done = False
|
23 |
+
|
24 |
+
|
25 |
+
def run(
|
26 |
+
query: str, api_key: str, chat_history: list[dict[str, str | None]]
|
27 |
+
) -> tuple[str,list[dict[str, str | None]]] | None:
|
28 |
+
global _login_done
|
29 |
+
if not api_key or not api_key.startswith("hf"):
|
30 |
+
raise ValueError("Incorrect HuggingFace Inference API Key")
|
31 |
+
if not _login_done:
|
32 |
+
login(token=api_key)
|
33 |
+
_login_done = True
|
34 |
+
try:
|
35 |
+
result = asyncio.run(workflow.run(user_query=query))
|
36 |
+
chat_history.append({"role": "user", "content": query})
|
37 |
+
chat_history.append({"role": "assistant", "content": result})
|
38 |
+
return "", chat_history
|
39 |
+
except Exception as err:
|
40 |
+
logger.error(f"Error during workflow execution: {err}")
|
41 |
+
return None
|
42 |
+
|
43 |
+
|
44 |
+
with gr.Blocks(fill_height=True) as demo:
|
45 |
+
gr.Markdown(value=PRIMARY_HEADING)
|
46 |
+
gr.Markdown(value=SECONDARY_HEADING)
|
47 |
+
gr.Markdown(
|
48 |
+
value="""<span style="color:red"> Please use a 🤗 Inference API Key </span>"""
|
49 |
+
)
|
50 |
+
api_key = gr.Textbox(
|
51 |
+
placeholder="Enter your HuggingFace Inference API KEY HERE",
|
52 |
+
label="🤗 Inference API Key",
|
53 |
+
show_label=True,
|
54 |
+
type="password",
|
55 |
+
)
|
56 |
+
chatbot = gr.Chatbot(
|
57 |
+
type="messages", label="DeepResearch", show_label=True, height=500,
|
58 |
+
show_copy_all_button=True, show_copy_button=True
|
59 |
+
)
|
60 |
+
msg = gr.Textbox(
|
61 |
+
placeholder="Type your message here and press enter...",
|
62 |
+
show_label=True,
|
63 |
+
label="Input",
|
64 |
+
submit_btn=True,
|
65 |
+
stop_btn=True,
|
66 |
+
)
|
67 |
+
clear = gr.ClearButton(components=[msg, chatbot])
|
68 |
+
msg.submit(fn=run, inputs=[msg, api_key, chatbot], outputs=[msg, chatbot])
|
69 |
+
|
70 |
+
|
71 |
+
if __name__ == "__main__":
|
72 |
+
demo.queue(max_size=1).launch(share=False)
|
73 |
+
|
74 |
+
|
75 |
+
# example queries
|
76 |
+
# tell me about reinforcement learning in robotics
|
77 |
+
# give me event details on reinforcement learning & robotics
|