Deep_Research_Assistant / deep_research.py
mallocode200's picture
Upload folder using huggingface_hub
ab0b7e1 verified
import gradio as gr
from dotenv import load_dotenv
from research_manager import ResearchManager, ResearchManagerAgent
from agents import Runner, trace, gen_trace_id
import os
# Import spaces for ZeroGPU compatibility
try:
import spaces
# Create a dummy GPU function to satisfy ZeroGPU requirements
@spaces.GPU
def dummy_gpu_function():
"""Dummy function to satisfy ZeroGPU requirements"""
return "GPU initialized"
# Call it once during import to satisfy the requirement
dummy_gpu_function()
except ImportError:
# Fallback for local development
class spaces:
@staticmethod
def GPU(func):
return func
load_dotenv(override=True)
# Available models for user selection
AVAILABLE_MODELS = [
"gpt-4o",
"gpt-4o-mini",
"gpt-4-turbo",
"gpt-4",
"gpt-3.5-turbo",
"o1-preview",
"o1-mini"
]
async def handle_query_submission(query: str, current_state: dict, api_key: str, model: str):
"""Handle initial query submission - generate clarifying questions with progress"""
if not query.strip():
return "Please enter a research query.", gr.update(visible=False), gr.update(visible=False), current_state
if not api_key.strip():
return "Please provide your OpenAI API key.", gr.update(visible=False), gr.update(visible=False), current_state
try:
# Show progress
progress_update = "πŸ”„ **Generating clarifying questions...**\n\nPlease wait while our AI analyzes your query and creates focused questions to improve the research quality."
research_manager = ResearchManager(api_key=api_key, model=model)
result = await research_manager.run_with_clarification(query)
# Format questions for display
questions_text = "\n\n".join([f"**{i+1}.** {q}" for i, q in enumerate(result["questions"])])
display_text = f"**βœ… Clarifying Questions Generated:**\n\n{questions_text}\n\n**Please answer these questions to help focus the research:**"
# Update state with query and questions
new_state = {
"query": query,
"questions": result["questions"],
"trace_id": result["trace_id"],
"api_key": api_key,
"model": model
}
return display_text, gr.update(visible=True), gr.update(visible=True), new_state
except Exception as e:
return f"❌ Error generating clarifying questions: {str(e)}", gr.update(visible=False), gr.update(visible=False), current_state
async def handle_research_with_answers(answers: str, current_state: dict, email_address: str, send_email: bool):
"""Handle research execution with clarification answers with progress updates"""
if not current_state.get("query"):
return "Please start by entering a research query first.", current_state
if not answers.strip():
return "Please provide answers to the clarifying questions.", current_state
api_key = current_state.get("api_key", "")
model = current_state.get("model", "gpt-4o-mini")
if not api_key:
return "API key missing. Please restart with your API key.", current_state
try:
# Show progress
progress_message = f"""πŸ”„ **Research in Progress...**
**Original Query:** {current_state['query']}
**Status:** Processing your clarifications and starting comprehensive research...
⏳ This may take 1-2 minutes. We're:
1. Planning search strategy
2. Conducting multiple web searches
3. Writing initial report
4. Evaluating quality
5. Optimizing if needed
6. Preparing final delivery"""
# Use the enhanced manager with email settings
from research_manager import create_custom_research_agent
# Parse answers (one per line)
answer_list = [line.strip() for line in answers.split('\n') if line.strip()]
# Format the query with clarifications
clarified_query = f"""Original query: {current_state['query']}
Clarifications provided:
{chr(10).join([f"{i+1}. {answer}" for i, answer in enumerate(answer_list)])}
Please use these clarifications to focus and refine the research approach."""
# Create custom agent with email settings and API configuration
custom_agent = create_custom_research_agent(
email_address=email_address if send_email else None,
send_email=send_email,
api_key=api_key,
model=model
)
# Run research with custom agent
trace_id = gen_trace_id()
with trace("Focused Research with Clarifications", trace_id=trace_id):
result = await Runner.run(
custom_agent,
f"Research Query: {clarified_query}"
)
email_status = ""
if send_email and email_address:
email_status = f"\nπŸ“§ **Email sent to:** {email_address}"
elif send_email and not email_address:
email_status = f"\n⚠️ **Email not sent:** No email address provided"
else:
email_status = f"\nπŸ“„ **Report generated:** Email sending disabled"
final_report = f"""**βœ… Research Complete!**
**πŸ”— Trace ID:** {trace_id}
**πŸ€– Model Used:** {model}
**Original Query:** {current_state['query']}
**πŸ“Š Enhanced Final Report:**
{result.final_output}
{email_status}
---
*Research completed using enhanced AI system with quality assurance and your clarifications.*"""
return final_report, current_state
except Exception as e:
return f"❌ Error during research: {str(e)}", current_state
async def run_direct_research(query: str, api_key: str, model: str, email_address: str = "", send_email: bool = False):
"""Run research directly without clarification using the new agent-based system"""
if not query.strip():
return "Please enter a research query."
if not api_key.strip():
return "Please provide your OpenAI API key."
try:
trace_id = gen_trace_id()
with trace("Enhanced Research Manager", trace_id=trace_id):
print(f"πŸ”— Starting enhanced research with trace: {trace_id}")
# Import the function here to avoid circular imports
from research_manager import create_custom_research_agent
# Create agent with email settings and API configuration
custom_agent = create_custom_research_agent(
email_address=email_address if send_email else None,
send_email=send_email,
api_key=api_key,
model=model
)
# Use the custom agent
result = await Runner.run(
custom_agent,
f"Research Query: {query}"
)
email_status = ""
if send_email and email_address:
email_status = f"\nπŸ“§ **Email sent to:** {email_address}"
elif send_email and not email_address:
email_status = f"\n⚠️ **Email not sent:** No email address provided"
else:
email_status = f"\nπŸ“„ **Report generated:** Email sending disabled"
return f"""**βœ… Research Complete!**
**πŸ”— Trace ID:** {trace_id}
**πŸ€– Model Used:** {model}
**πŸ‘€ View Detailed Trace:** https://platform.openai.com/traces/trace?trace_id={trace_id}
**πŸ“Š Enhanced Research Report with Quality Assurance:**
{result.final_output}
{email_status}
---
*πŸ€– This research was conducted using our enhanced agent-based system with automatic quality evaluation and optimization. Check the trace link above to see the full workflow including planning, searching, writing, evaluation, and optimization steps.*"""
except Exception as e:
import traceback
error_details = traceback.format_exc()
print(f"Error details: {error_details}")
return f"❌ Error during research: {str(e)}\n\nPlease check your API key and model selection, or try the Legacy Quick Research option if this persists."
async def run_legacy_research(query: str, api_key: str, model: str, email_address: str, send_email: bool):
"""Run research using the original ResearchManager class with email options"""
if not query.strip():
return "Please enter a research query."
if not api_key.strip():
return "Please provide your OpenAI API key."
try:
# Use the enhanced system but call it "legacy" for the user
trace_id = gen_trace_id()
with trace("Quick Research", trace_id=trace_id):
from research_manager import create_custom_research_agent
# Create agent with email settings and API configuration
custom_agent = create_custom_research_agent(
email_address=email_address if send_email else None,
send_email=send_email,
api_key=api_key,
model=model
)
result = await Runner.run(
custom_agent,
f"Research Query: {query}"
)
email_status = ""
if send_email and email_address:
email_status = f"\nπŸ“§ **Email sent to:** {email_address}"
elif send_email and not email_address:
email_status = f"\n⚠️ **Email not sent:** No email address provided"
else:
email_status = f"\nπŸ“„ **Report generated:** Email sending disabled"
return f"""**βœ… Quick Research Complete!**
**πŸ”— Trace ID:** {trace_id}
**πŸ€– Model Used:** {model}
**πŸ“Š Research Report:**
{result.final_output}
{email_status}
---
*Research completed using streamlined research system.*"""
except Exception as e:
import traceback
error_details = traceback.format_exc()
print(f"Error details: {error_details}")
return f"❌ Error during research: {str(e)}\n\nPlease check your API key and model selection."
async def run_direct_research_with_progress(query: str, api_key: str, model: str, email_address: str = "", send_email: bool = False):
"""Run direct research with real-time progress tracking"""
if not query.strip():
yield "Please enter a research query."
return
if not api_key.strip():
yield "Please provide your OpenAI API key."
return
try:
# Import here to avoid circular imports
from research_manager import run_research_with_progress
# Use the progress-enabled research function
async for progress_update in run_research_with_progress(
query=query,
email_address=email_address if send_email else None,
send_email=send_email,
api_key=api_key,
model=model
):
yield progress_update
except Exception as e:
import traceback
error_details = traceback.format_exc()
print(f"Error details: {error_details}")
yield f"❌ Error during research: {str(e)}\n\nPlease check your API key and model selection."
async def run_enhanced_research_with_progress(query: str, api_key: str, model: str, email_address: str = "", send_email: bool = False):
"""Run enhanced research with real-time progress tracking"""
if not query.strip():
yield "Please enter a research query."
return
if not api_key.strip():
yield "Please provide your OpenAI API key."
return
try:
# Import here to avoid circular imports
from research_manager import run_research_with_progress
# Use the progress-enabled research function
async for progress_update in run_research_with_progress(
query=query,
email_address=email_address if send_email else None,
send_email=send_email,
api_key=api_key,
model=model
):
yield progress_update
except Exception as e:
import traceback
error_details = traceback.format_exc()
print(f"Error details: {error_details}")
yield f"❌ Error during enhanced research: {str(e)}\n\nPlease check your API key and model selection."
async def run_clarified_research_with_progress(answers: str, current_state: dict, email_address: str, send_email: bool):
"""Run research with clarification answers and real-time progress tracking"""
if not current_state.get("query"):
yield "Please start by entering a research query first."
return
if not answers.strip():
yield "Please provide answers to the clarifying questions."
return
api_key = current_state.get("api_key", "")
model = current_state.get("model", "gpt-4o-mini")
if not api_key:
yield "API key missing. Please restart with your API key."
return
try:
# Parse answers (one per line)
answer_list = [line.strip() for line in answers.split('\n') if line.strip()]
# Format the query with clarifications
clarified_query = f"""Original query: {current_state['query']}
Clarifications provided:
{chr(10).join([f"{i+1}. {answer}" for i, answer in enumerate(answer_list)])}
Please use these clarifications to focus and refine the research approach."""
# Import here to avoid circular imports
from research_manager import run_research_with_progress
# Use the progress-enabled research function with clarified query
async for progress_update in run_research_with_progress(
query=clarified_query,
email_address=email_address if send_email else None,
send_email=send_email,
api_key=api_key,
model=model
):
yield progress_update
except Exception as e:
yield f"❌ Error during research: {str(e)}"
# Custom CSS for better readability and contrast
custom_css = """
/* Main container improvements */
.gradio-container {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
}
/* Light theme styles for text inputs */
.gradio-container input[type="text"],
.gradio-container textarea {
background-color: #ffffff !important;
border: 2px solid #d1d5db !important;
border-radius: 8px !important;
padding: 12px !important;
font-size: 14px !important;
color: #374151 !important;
font-weight: 400 !important;
line-height: 1.5 !important;
transition: border-color 0.2s ease !important;
}
.gradio-container input[type="text"]:focus,
.gradio-container textarea:focus {
border-color: #60a5fa !important;
box-shadow: 0 0 0 3px rgba(96, 165, 250, 0.2) !important;
outline: none !important;
}
/* Placeholder styling for all inputs */
.gradio-container input[type="text"]::placeholder,
.gradio-container textarea::placeholder {
color: #6b7280 !important;
opacity: 0.8 !important;
font-style: italic !important;
}
/* Dark theme styles for text inputs - Multiple selectors for compatibility */
.gradio-container.dark input[type="text"],
.gradio-container.dark textarea,
[data-testid="container"].dark input[type="text"],
[data-testid="container"].dark textarea,
.dark input[type="text"],
.dark textarea {
background-color: #4b5563 !important;
border: 2px solid #6b7280 !important;
color: #f9fafb !important;
}
.gradio-container.dark input[type="text"]::placeholder,
.gradio-container.dark textarea::placeholder,
[data-testid="container"].dark input[type="text"]::placeholder,
[data-testid="container"].dark textarea::placeholder,
.dark input[type="text"]::placeholder,
.dark textarea::placeholder {
color: #9ca3af !important;
}
/* Additional dark theme detection via media query */
@media (prefers-color-scheme: dark) {
.gradio-container input[type="text"],
.gradio-container textarea {
background-color: #4b5563 !important;
border: 2px solid #6b7280 !important;
color: #f9fafb !important;
}
.gradio-container input[type="text"]::placeholder,
.gradio-container textarea::placeholder {
color: #9ca3af !important;
}
}
/* Simple button styling with good contrast */
.gradio-container button {
border-radius: 8px !important;
font-weight: 500 !important;
font-size: 14px !important;
padding: 8px 16px !important;
border: 2px solid transparent !important;
transition: all 0.2s ease !important;
}
/* Primary buttons */
button[variant="primary"] {
background-color: #3b82f6 !important;
color: white !important;
border-color: #3b82f6 !important;
}
button[variant="primary"]:hover {
background-color: #2563eb !important;
border-color: #2563eb !important;
}
/* Secondary buttons */
button[variant="secondary"] {
background-color: #f8fafc !important;
color: #374151 !important;
border-color: #d1d5db !important;
}
button[variant="secondary"]:hover {
background-color: #f1f5f9 !important;
border-color: #9ca3af !important;
}
/* Theme-adaptive section styling - Light theme */
.clarification-section {
border: 2px solid #e5e7eb !important;
border-radius: 12px !important;
padding: 20px !important;
margin: 16px 0 !important;
background-color: #ffffff !important;
color: #374151 !important;
}
.clarification-section * {
color: #374151 !important;
}
.clarification-section h1,
.clarification-section h2,
.clarification-section h3 {
color: #374151 !important;
font-weight: 600 !important;
}
/* Dark theme specific styles for clarification section - Multiple selectors */
.gradio-container.dark .clarification-section,
[data-testid="container"].dark .clarification-section,
.dark .clarification-section {
background-color: #374151 !important;
border-color: #4b5563 !important;
color: #ffffff !important;
}
.gradio-container.dark .clarification-section *,
[data-testid="container"].dark .clarification-section *,
.dark .clarification-section * {
color: #ffffff !important;
}
.gradio-container.dark .clarification-section h1,
.gradio-container.dark .clarification-section h2,
.gradio-container.dark .clarification-section h3,
[data-testid="container"].dark .clarification-section h1,
[data-testid="container"].dark .clarification-section h2,
[data-testid="container"].dark .clarification-section h3,
.dark .clarification-section h1,
.dark .clarification-section h2,
.dark .clarification-section h3 {
color: #ffffff !important;
}
/* Additional dark theme detection for clarification section via media query */
@media (prefers-color-scheme: dark) {
.clarification-section {
background-color: #374151 !important;
border-color: #4b5563 !important;
color: #ffffff !important;
}
.clarification-section * {
color: #ffffff !important;
}
}
/* Clean answer box - Light theme */
.answer-textbox {
background-color: #ffffff !important;
border: 2px solid #d1d5db !important;
border-radius: 8px !important;
padding: 12px !important;
color: #374151 !important;
line-height: 1.5 !important;
}
.answer-textbox:focus {
border-color: #60a5fa !important;
box-shadow: 0 0 0 3px rgba(96, 165, 250, 0.2) !important;
}
/* Target the actual textarea element inside answer-textbox */
.answer-textbox textarea {
background-color: #ffffff !important;
color: #374151 !important;
border: 2px solid #d1d5db !important;
border-radius: 8px !important;
padding: 12px !important;
font-size: 14px !important;
font-weight: 400 !important;
line-height: 1.5 !important;
}
.answer-textbox textarea:focus {
border-color: #60a5fa !important;
box-shadow: 0 0 0 3px rgba(96, 165, 250, 0.2) !important;
}
/* Light theme placeholder text */
.answer-textbox textarea::placeholder {
color: #6b7280 !important;
opacity: 0.8 !important;
font-style: italic !important;
}
/* Make all textareas have proper text color for light theme */
.gradio-container textarea {
color: #374151 !important;
}
.answer-textbox::placeholder {
color: #6b7280 !important;
opacity: 0.9 !important;
}
/* Dark theme styles for answer box - Multiple selectors for compatibility */
.gradio-container.dark .answer-textbox,
[data-testid="container"].dark .answer-textbox,
.dark .answer-textbox {
background-color: #4b5563 !important;
border: 2px solid #6b7280 !important;
color: #d1d5db !important;
}
.gradio-container.dark .answer-textbox textarea,
[data-testid="container"].dark .answer-textbox textarea,
.dark .answer-textbox textarea {
background-color: #4b5563 !important;
color: #f9fafb !important;
border: 2px solid #6b7280 !important;
}
.gradio-container.dark .answer-textbox textarea::placeholder,
[data-testid="container"].dark .answer-textbox textarea::placeholder,
.dark .answer-textbox textarea::placeholder {
color: #9ca3af !important;
}
.gradio-container.dark textarea,
[data-testid="container"].dark textarea,
.dark textarea {
color: #f9fafb !important;
}
.gradio-container.dark .answer-textbox::placeholder,
[data-testid="container"].dark .answer-textbox::placeholder,
.dark .answer-textbox::placeholder {
color: #9ca3af !important;
}
/* Additional dark theme detection for answer box via media query */
@media (prefers-color-scheme: dark) {
.answer-textbox {
background-color: #4b5563 !important;
border: 2px solid #6b7280 !important;
color: #d1d5db !important;
}
.answer-textbox textarea {
background-color: #4b5563 !important;
color: #f9fafb !important;
border: 2px solid #6b7280 !important;
}
.answer-textbox textarea::placeholder {
color: #9ca3af !important;
}
.gradio-container textarea {
color: #f9fafb !important;
}
}
/* Theme-adaptive results display - Light theme */
.results-display {
border: 2px solid #e5e7eb !important;
border-radius: 8px !important;
padding: 16px !important;
margin: 12px 0 !important;
line-height: 1.6 !important;
background-color: #ffffff !important;
color: #374151 !important;
}
/* Make sure markdown in results display uses proper colors for light theme */
.results-display * {
color: #374151 !important;
}
/* Dark theme specific styles for results display - Multiple selectors */
.gradio-container.dark .results-display,
[data-testid="container"].dark .results-display,
.dark .results-display {
background-color: #374151 !important;
border-color: #4b5563 !important;
color: #ffffff !important;
}
.gradio-container.dark .results-display *,
[data-testid="container"].dark .results-display *,
.dark .results-display * {
color: #ffffff !important;
}
/* Additional dark theme detection for results display via media query */
@media (prefers-color-scheme: dark) {
.results-display {
background-color: #374151 !important;
border-color: #4b5563 !important;
color: #ffffff !important;
}
.results-display * {
color: #ffffff !important;
}
}
/* Style links in results display for visibility */
.results-display a {
color: #60a5fa !important;
text-decoration: underline !important;
}
.results-display a:hover {
color: #93c5fd !important;
}
/* Accordion improvements */
.gradio-accordion {
border: 1px solid #e5e7eb !important;
border-radius: 8px !important;
margin: 8px 0 !important;
}
/* Status indicators with good contrast */
.status-success {
color: #059669 !important;
font-weight: 500 !important;
}
.status-info {
color: #0369a1 !important;
font-weight: 500 !important;
}
.status-warning {
color: #d97706 !important;
font-weight: 500 !important;
}
/* Theme-adaptive headers */
h1, h2, h3 {
color: var(--body-text-color) !important;
font-weight: 600 !important;
}
/* Fallback for when CSS variables aren't available */
@media (prefers-color-scheme: dark) {
h1, h2, h3 {
color: #ffffff !important;
}
}
@media (prefers-color-scheme: light) {
h1, h2, h3 {
color: #1f2937 !important;
}
}
/* Specific overrides for Gradio themes */
.gradio-container.dark h1,
.gradio-container.dark h2,
.gradio-container.dark h3 {
color: #ffffff !important;
}
.gradio-container.light h1,
.gradio-container.light h2,
.gradio-container.light h3 {
color: #1f2937 !important;
}
/* Remove unnecessary gradients and shadows for simplicity */
* {
box-shadow: none !important;
}
/* Keep only essential shadows for depth */
.gradio-container button,
.gradio-container input,
.gradio-container textarea {
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1) !important;
}
.gradio-container button:hover {
box-shadow: 0 2px 6px rgba(0, 0, 0, 0.15) !important;
}
"""
with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), css=custom_css) as ui:
gr.Markdown("# πŸ” Deep Research Assistant")
gr.Markdown("**Ask a research question and get comprehensive, AI-powered analysis with quality assurance.**")
# State to track the conversation
state = gr.State({})
# Main Research Configuration Block
with gr.Column():
# API Configuration Section
gr.Markdown("### πŸ”‘ API Configuration")
with gr.Row():
with gr.Column(scale=2):
api_key_textbox = gr.Textbox(
label="OpenAI API Key",
placeholder="sk-...",
type="password",
lines=1,
info="Your OpenAI API key (required to avoid rate limits)"
)
with gr.Column(scale=1):
model_textbox = gr.Dropdown(
label="Model Selection",
choices=AVAILABLE_MODELS,
value="gpt-4o-mini",
info="Choose your preferred OpenAI model"
)
gr.Markdown("### πŸ” Research Query")
query_textbox = gr.Textbox(
label="Research Query",
placeholder="What would you like to research? (e.g., 'Latest developments in renewable energy')",
lines=2,
elem_classes=["main-input"]
)
# Email Configuration (part of main block)
with gr.Accordion("πŸ“§ Email Configuration (Optional)", open=False):
gr.Markdown("**Configure email delivery for your research reports**")
with gr.Row():
with gr.Column(scale=3):
email_textbox = gr.Textbox(
label="Email Address",
placeholder="your.email@example.com",
lines=1
)
with gr.Column(scale=1):
send_email_checkbox = gr.Checkbox(
label="Send Email",
value=False,
info="Check to receive the report via email"
)
gr.Markdown("*This email setting will be used for any research option you choose below.*")
# Start Research Button (below the main configuration)
submit_button = gr.Button("πŸš€ Start Research", variant="primary", size="lg")
# Output area for questions and results
output_area = gr.Markdown(
label="Research Progress",
elem_classes=["results-display"],
value="πŸ‘‹ Enter your research query above and configure email settings if desired, then click Start Research!"
)
# Clarification answers section (initially hidden)
with gr.Column(visible=False, elem_classes=["clarification-section"]) as clarification_row:
gr.Markdown("### πŸ’­ Help us focus your research")
gr.Markdown("Please answer these questions to get more targeted results:")
answers_textbox = gr.Textbox(
label="Your Answers",
placeholder="Answer each question on a separate line...\n\nExample:\n1. I'm interested in solar and wind technologies\n2. I need technical details and market analysis\n3. This is for a business presentation",
lines=6,
elem_classes=["answer-textbox"],
show_label=True
)
research_button = gr.Button(
"πŸ” Run Focused Research",
variant="primary",
visible=False,
size="lg"
)
# Research options
with gr.Accordion("πŸ€– Enhanced Research (Recommended)", open=False):
gr.Markdown("""
**New AI-powered research system featuring:**
βœ… **Quality Evaluation** - Each report is automatically assessed
βœ… **Smart Optimization** - Reports are improved if needed
βœ… **Comprehensive Analysis** - Multiple search strategies
*Delivers higher quality research through AI quality assurance.*
""")
enhanced_button = gr.Button("πŸ€– Enhanced Research", variant="primary")
with gr.Accordion("⚑ Quick Research (Legacy)", open=False):
gr.Markdown("*Faster research using the original system - good for quick queries.*")
direct_button = gr.Button("⚑ Quick Research", variant="secondary")
# Event handlers
submit_button.click(
fn=handle_query_submission,
inputs=[query_textbox, state, api_key_textbox, model_textbox],
outputs=[output_area, clarification_row, research_button, state]
)
query_textbox.submit(
fn=handle_query_submission,
inputs=[query_textbox, state, api_key_textbox, model_textbox],
outputs=[output_area, clarification_row, research_button, state]
)
research_button.click(
fn=run_clarified_research_with_progress,
inputs=[answers_textbox, state, email_textbox, send_email_checkbox],
outputs=[output_area],
show_progress=True
)
answers_textbox.submit(
fn=run_clarified_research_with_progress,
inputs=[answers_textbox, state, email_textbox, send_email_checkbox],
outputs=[output_area],
show_progress=True
)
enhanced_button.click(
fn=run_enhanced_research_with_progress,
inputs=[query_textbox, api_key_textbox, model_textbox, email_textbox, send_email_checkbox],
outputs=[output_area],
show_progress=True
)
direct_button.click(
fn=run_direct_research_with_progress,
inputs=[query_textbox, api_key_textbox, model_textbox, email_textbox, send_email_checkbox],
outputs=[output_area],
show_progress=True
)
if __name__ == "__main__":
ui.launch(inbrowser=True)