chatui-helper / app.py
milwright
update gradio to 5.42.0 and fix export conversation functionality
d68f6d0
raw
history blame
49.4 kB
"""
HuggingFace Space Generator - Refactored with Gradio 5.x Best Practices
Creates customizable AI chat interfaces for deployment on HuggingFace Spaces
"""
import gradio as gr
import json
import zipfile
import io
import os
import requests
import re
import tempfile
from datetime import datetime
from dotenv import load_dotenv
from pathlib import Path
# Import our shared utilities
from utils import (
get_theme, fetch_url_content, create_safe_filename,
export_conversation_to_markdown, process_file_upload,
ConfigurationManager, get_model_choices, AVAILABLE_THEMES,
extract_urls_from_text
)
# Load environment variables
load_dotenv()
# Load templates
try:
from space_template import get_template, validate_template
print("Loaded space template")
except Exception as e:
print(f"Could not load space_template.py: {e}")
# Fallback template will be defined if needed
# Load academic templates if available
try:
with open('academic_templates.json', 'r') as f:
ACADEMIC_TEMPLATES = json.load(f)
print(f"Loaded {len(ACADEMIC_TEMPLATES)} academic templates")
except Exception as e:
print(f"Could not load academic templates: {e}")
ACADEMIC_TEMPLATES = {}
class SpaceGenerator:
"""Main application class for generating HuggingFace Spaces"""
def __init__(self):
self.default_config = {
'name': 'AI Assistant',
'tagline': 'A customizable AI assistant',
'description': 'A versatile AI assistant powered by advanced language models. Configure it to meet your specific needs with custom prompts, examples, and grounding URLs.',
'system_prompt': 'You are a helpful AI assistant.',
'model': 'google/gemini-2.0-flash-001',
'api_key_var': 'API_KEY',
'temperature': 0.7,
'max_tokens': 750,
'theme': 'Default',
'grounding_urls': [],
'enable_dynamic_urls': True,
'enable_file_upload': True,
'examples': [
"Hello! How can you help me?",
"Tell me something interesting",
"What can you do?"
]
}
self.config_manager = ConfigurationManager(self.default_config)
self.current_config = {}
self.url_content_cache = {}
# Cache for custom values when switching templates
self.custom_values_cache = {}
def create_interface(self) -> gr.Blocks:
"""Create the main Gradio interface"""
theme = get_theme("Default") # Using Default theme for the generator
with gr.Blocks(title="ChatUI Helper", theme=theme) as demo:
# Header
gr.Markdown("# ChatUI Helper")
gr.Markdown("Create customizable AI chat interfaces for deployment on HuggingFace Spaces")
# Shared state - create these first so they can be referenced in tabs
self.config_state = gr.State({})
self.preview_chat_history = gr.State([])
self.previous_template = gr.State("None (Custom)")
self.template_cache = gr.State({})
# Main tabs
with gr.Tabs() as main_tabs:
self.main_tabs = main_tabs # Store reference for tab switching
# Configuration Tab
with gr.Tab("Configuration"):
self._create_configuration_tab()
# Preview Tab
with gr.Tab("Preview"):
self._create_preview_tab()
# Documentation Tab
with gr.Tab("Documentation"):
self._create_documentation_tab()
return demo
def _create_configuration_tab(self):
"""Create the configuration tab with modern Gradio patterns"""
with gr.Column():
# Template Selection
with gr.Group():
gr.Markdown("### πŸ“ Quick Start Templates")
template_selector = gr.Dropdown(
label="Select Template",
choices=["None (Custom)"] + list(ACADEMIC_TEMPLATES.keys()),
value="None (Custom)",
interactive=True
)
# Space Identity
with gr.Group():
gr.Markdown("### 🎯 Space Identity")
with gr.Row():
self.name_input = gr.Textbox(
label="Assistant Name",
placeholder="My AI Assistant",
value="AI Assistant"
)
self.theme_input = gr.Dropdown(
label="Theme",
choices=list(AVAILABLE_THEMES.keys()),
value="Default"
)
self.tagline_input = gr.Textbox(
label="Tagline",
placeholder="Brief tagline for HuggingFace...",
max_length=60,
info="Maximum 60 characters (for YAML frontmatter)"
)
self.description_input = gr.Textbox(
label="Description",
placeholder="A detailed description of your AI assistant. You can use markdown formatting here...",
lines=4,
info="Full markdown description for the README"
)
# System Configuration
with gr.Group():
gr.Markdown("### βš™οΈ System Configuration")
self.system_prompt_input = gr.Textbox(
label="System Prompt",
placeholder="You are a helpful AI assistant...",
lines=5,
info="The system prompt that guides the AI's behavior and responses."
)
self.model_input = gr.Dropdown(
label="Model",
choices=get_model_choices(),
value="google/gemini-2.0-flash-001"
)
self.language_input = gr.Dropdown(
label="Language",
choices=[
"Arabic",
"Bengali",
"English",
"French",
"German",
"Hindi",
"Italian",
"Japanese",
"Korean",
"Mandarin",
"Portuguese",
"Russian",
"Spanish",
"Turkish"
],
value="English"
)
with gr.Row():
self.temperature_input = gr.Slider(
label="Temperature",
minimum=0,
maximum=2,
value=0.7,
step=0.1
)
self.max_tokens_input = gr.Slider(
label="Max Tokens",
minimum=50,
maximum=4096,
value=750,
step=50
)
# Example Prompts
with gr.Group():
gr.Markdown("### πŸ’‘ Example Prompts")
gr.Markdown("Provide 3-5 sample prompts that showcase your assistant's capabilities")
# Create individual example input fields
self.example_inputs = []
for i in range(5):
example_input = gr.Textbox(
label=f"Example {i+1}",
placeholder=f"Sample prompt {i+1}...",
visible=(i < 3) # Show first 3 by default
)
self.example_inputs.append(example_input)
with gr.Row():
add_example_btn = gr.Button("βž• Add Example", size="sm")
remove_example_btn = gr.Button("βž– Remove Example", size="sm", visible=False)
self.example_count = gr.State(3)
# URL Grounding
with gr.Group():
gr.Markdown("### πŸ”— URL Grounding")
gr.Markdown("Add URLs to provide context to your assistant")
# Dynamic URL fields using gr.render
self.url_count = gr.State(2)
self.url_inputs = []
# Create initial URL inputs
for i in range(10):
url_input = gr.Textbox(
label=f"URL {i+1}" + (" (Primary - 8000 chars)" if i < 2 else " (Secondary - 2500 chars)"),
placeholder="https://...",
visible=(i < 2)
)
self.url_inputs.append(url_input)
with gr.Row():
add_url_btn = gr.Button("βž• Add URL", size="sm")
remove_url_btn = gr.Button("βž– Remove URL", size="sm", visible=False)
# Environment Variables
with gr.Accordion("πŸ”‘ Environment Variables", open=False):
gr.Markdown("Configure the required secrets in your HuggingFace Space settings.")
# Required API Key on its own row
self.api_key_var_input = gr.Textbox(
label="API Key Variable Name (Required)",
value="API_KEY",
info="Environment variable for OpenRouter API key",
interactive=False,
show_copy_button=True
)
# Optional variables on the same row
gr.Markdown("**Optional Environment Variables:**")
with gr.Row():
self.hf_token_input = gr.Textbox(
label="HF Token Variable Name",
value="HF_TOKEN",
info="Environment variable for HuggingFace token",
interactive=False,
show_copy_button=True
)
self.access_code_input = gr.Textbox(
label="Access Code Variable",
value="ACCESS_CODE",
info="Environment variable for password protection",
interactive=False,
show_copy_button=True
)
# Instructions
with gr.Accordion("πŸ“– Step-by-Step Instructions", open=False):
gr.Markdown(
"""### How to Configure Secrets
1. Go to your Space β†’ Click **βš™οΈ Settings** tab
2. Find **Variables and secrets** β†’ Click **New secret**
3. Add your secrets:
![Adding API Key Secret](img/add-secret-api.png)
**Required:**
- `API_KEY` - Your OpenRouter key from https://openrouter.ai/keys
**Optional:**
- `HF_TOKEN` - For auto-updates (get from https://huggingface.co/settings/tokens)
- `ACCESS_CODE` - For password protection
"""
)
# Configuration Upload
with gr.Accordion("πŸ“€ Upload Configuration", open=False):
config_upload = gr.File(
label="Upload config.json",
file_types=[".json"],
type="filepath"
)
upload_status = gr.Markdown(visible=False)
# Action Buttons
with gr.Row():
preview_btn = gr.Button("πŸ’¬ Preview Configuration", variant="secondary")
generate_btn = gr.Button("πŸ—³οΈ Generate Deployment Package", variant="primary")
# Output Section
with gr.Column(visible=False) as output_section:
output_message = gr.Markdown()
download_file = gr.File(label="πŸ“¦ Download Package", visible=False)
deployment_details = gr.Markdown(visible=False)
# Event Handlers
template_selector.change(
self._apply_template,
inputs=[
template_selector, self.previous_template, self.template_cache,
self.name_input, self.tagline_input, self.description_input, self.system_prompt_input,
self.model_input, self.language_input, self.temperature_input, self.max_tokens_input
] + self.example_inputs + self.url_inputs,
outputs=[
self.name_input, self.tagline_input, self.description_input, self.system_prompt_input,
self.model_input, self.language_input, self.temperature_input, self.max_tokens_input
] + self.example_inputs + self.url_inputs + [self.previous_template, self.template_cache]
)
config_upload.upload(
self._apply_uploaded_config,
inputs=[config_upload],
outputs=[
self.name_input, self.tagline_input, self.description_input, self.system_prompt_input,
self.model_input, self.language_input, self.theme_input, self.api_key_var_input,
self.temperature_input, self.max_tokens_input, self.access_code_input, upload_status
] + self.example_inputs + self.url_inputs
)
# URL management
def update_url_visibility(count):
new_count = min(count + 1, 10)
updates = []
for i in range(10):
updates.append(gr.update(visible=(i < new_count)))
updates.append(gr.update(visible=(new_count > 2))) # Remove button
updates.append(new_count)
return updates
def remove_url(count):
new_count = max(count - 1, 2)
updates = []
for i in range(10):
updates.append(gr.update(visible=(i < new_count)))
updates.append(gr.update(visible=(new_count > 2))) # Remove button
updates.append(new_count)
return updates
add_url_btn.click(
update_url_visibility,
inputs=[self.url_count],
outputs=self.url_inputs + [remove_url_btn, self.url_count]
)
remove_url_btn.click(
remove_url,
inputs=[self.url_count],
outputs=self.url_inputs + [remove_url_btn, self.url_count]
)
# Example management
def update_example_visibility(count):
new_count = min(count + 1, 5)
updates = []
for i in range(5):
updates.append(gr.update(visible=(i < new_count)))
updates.append(gr.update(visible=(new_count > 3))) # Remove button
updates.append(new_count)
return updates
def remove_example(count):
new_count = max(count - 1, 3)
updates = []
for i in range(5):
updates.append(gr.update(visible=(i < new_count)))
updates.append(gr.update(visible=(new_count > 3))) # Remove button
updates.append(new_count)
return updates
add_example_btn.click(
update_example_visibility,
inputs=[self.example_count],
outputs=self.example_inputs + [remove_example_btn, self.example_count]
)
remove_example_btn.click(
remove_example,
inputs=[self.example_count],
outputs=self.example_inputs + [remove_example_btn, self.example_count]
)
# Preview and Generate handlers
preview_btn.click(
self._preview_configuration,
inputs=[
self.name_input, self.tagline_input, self.description_input, self.system_prompt_input,
self.model_input, self.language_input, self.theme_input, self.api_key_var_input,
self.temperature_input, self.max_tokens_input, self.access_code_input
] + self.example_inputs + self.url_inputs,
outputs=[self.config_state]
).then(
lambda: gr.Tabs(selected=1), # Switch to preview tab
outputs=[self.main_tabs]
)
generate_btn.click(
self._generate_package,
inputs=[
self.name_input, self.tagline_input, self.description_input, self.system_prompt_input,
self.model_input, self.language_input, self.theme_input, self.api_key_var_input,
self.temperature_input, self.max_tokens_input, self.access_code_input
] + self.example_inputs + self.url_inputs,
outputs=[
output_section, output_message, download_file,
deployment_details, self.config_state
]
)
def _create_preview_tab(self):
"""Create the preview tab with modern patterns"""
with gr.Column():
# Use gr.render for dynamic preview based on config
@gr.render(inputs=[self.config_state])
def render_preview(config):
if not config or not config.get('preview_ready'):
gr.Markdown(
"### ⚠️ Preview Not Ready\n\n"
"Configure your assistant in the Configuration tab and click 'Preview Configuration' to test it here."
)
return
# Header
gr.Markdown(f"# {config.get('name', 'AI Assistant')}")
if config.get('tagline'):
gr.Markdown(f"*{config.get('tagline')}*")
# Chat interface
preview_chatbot = gr.Chatbot(
type="messages",
height=400,
show_copy_button=True
)
# Message input
msg = gr.Textbox(
label="Message",
placeholder="Type your message here...",
lines=2
)
# Buttons
with gr.Row():
submit_btn = gr.Button("Send", variant="primary")
clear_btn = gr.Button("Clear")
# Export functionality
with gr.Row():
export_btn = gr.DownloadButton(
"πŸ“₯ Export Conversation",
variant="secondary",
size="sm"
)
# Examples section
examples = config.get('examples_list', [])
if examples:
gr.Examples(examples=examples, inputs=msg)
# File upload accordion (if enabled)
if config.get('enable_file_upload'):
with gr.Accordion("πŸ“Ž Upload Files", open=False):
file_upload = gr.File(
label="Upload Files",
file_types=None,
file_count="multiple",
visible=True,
interactive=True
)
uploaded_files_display = gr.Markdown("", visible=False)
# Configuration accordion
with gr.Accordion("ℹ️ Configuration", open=False):
config_display = {k: v for k, v in config.items() if k != 'preview_ready'}
gr.JSON(
value=config_display,
label="Active Configuration",
show_label=True
)
# Chat functionality
def respond(message, chat_history):
if not message:
return chat_history, ""
# Simulate response for preview
api_key = os.environ.get(config.get('api_key_var', 'API_KEY'))
if not api_key:
response = (
f"πŸ”‘ **API Key Required**\n\n"
f"Please configure your OpenRouter API key:\n"
f"1. Go to Settings (βš™οΈ) in your HuggingFace Space\n"
f"2. Click 'Variables and secrets'\n"
f"3. Add secret: **{config.get('api_key_var', 'API_KEY')}**\n"
f"4. Value: Your OpenRouter API key (starts with `sk-or-`)\n\n"
f"Get your API key at: https://openrouter.ai/keys"
)
else:
# Make actual API call to OpenRouter
try:
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
# Get grounding context from URLs if configured
grounding_context = ""
urls = config.get('grounding_urls', [])
if urls and len(urls) > 0:
grounding_context = ""
# Process primary sources (first 2 URLs)
primary_urls = urls[:2]
if primary_urls:
grounding_context += "\nπŸ“š **PRIMARY SOURCES:**\n"
for i, url in enumerate(primary_urls, 1):
try:
content = fetch_url_content(url, max_chars=8000)
if not content.startswith("❌") and not content.startswith("⏱️"):
grounding_context += f"\n**Primary Source {i} - {url}:**\n{content}\n"
except:
pass
# Process secondary sources (URLs 3+)
secondary_urls = urls[2:]
if secondary_urls:
grounding_context += "\n\nπŸ“Ž **SECONDARY SOURCES:**\n"
for i, url in enumerate(secondary_urls, 1):
try:
content = fetch_url_content(url, max_chars=2500)
if not content.startswith("❌") and not content.startswith("⏱️"):
grounding_context += f"\n**Secondary Source {i} - {url}:**\n{content}\n"
except:
pass
# Check for dynamic URLs in message if enabled
if config.get('enable_dynamic_urls', True):
urls_in_message = extract_urls_from_text(message)
if urls_in_message:
dynamic_context = "\nπŸ“Ž **Dynamic Context:**\n"
for url in urls_in_message[:3]: # Limit to 3 URLs
try:
content = fetch_url_content(url, max_chars=2500)
if not content.startswith("❌"):
dynamic_context += f"\n{content}"
except:
pass
grounding_context += dynamic_context
# Build messages for API with grounding context in system prompt
system_content = config.get('system_prompt', 'You are a helpful AI assistant.')
language = config.get('language', 'English')
# Add language instruction if not English
if language != 'English':
system_content += f"\n\nIMPORTANT: You must respond EXCLUSIVELY in {language}. All your responses should be written entirely in {language}, even when user input is in a different language, particularly English."
if grounding_context:
system_content += "\n\nIMPORTANT: When providing information from the reference sources below, please cite the specific URL(s) where the information can be found."
system_content = f"{system_content}\n\n{grounding_context}"
messages = [{"role": "system", "content": system_content}]
# Add conversation history
for msg in chat_history:
if isinstance(msg, dict) and 'role' in msg and 'content' in msg:
messages.append({
"role": msg['role'],
"content": msg['content']
})
# Add current message
messages.append({
"role": "user",
"content": message
})
data = {
"model": config.get('model', 'openai/gpt-3.5-turbo'),
"messages": messages,
"temperature": config.get('temperature', 0.7),
"max_tokens": config.get('max_tokens', 750),
"stream": False
}
# Add additional headers to match space template
space_id = os.environ.get('SPACE_ID', '')
headers.update({
"HTTP-Referer": f"https://huggingface.co/spaces/{space_id}" if space_id else "https://huggingface.co",
"X-Title": config.get('name', 'AI Assistant')
})
response = requests.post(
"https://openrouter.ai/api/v1/chat/completions",
headers=headers,
json=data,
timeout=30
)
if response.status_code == 200:
result = response.json()
response = result['choices'][0]['message']['content']
else:
error_data = response.json()
error_message = error_data.get('error', {}).get('message', 'Unknown error')
response = f"❌ API Error ({response.status_code}): {error_message}"
except requests.exceptions.Timeout:
response = "⏰ Request timeout (30s limit). Try a shorter message or different model."
except requests.exceptions.ConnectionError:
response = "🌐 Connection error. Check your internet connection and try again."
except Exception as e:
response = f"❌ Error: {str(e)}"
# Update chat history
chat_history = chat_history + [
{"role": "user", "content": message},
{"role": "assistant", "content": response}
]
return chat_history, ""
# Wire up the interface
msg.submit(respond, [msg, preview_chatbot], [preview_chatbot, msg])
submit_btn.click(respond, [msg, preview_chatbot], [preview_chatbot, msg])
clear_btn.click(lambda: ([], ""), outputs=[preview_chatbot, msg])
# Export handler
def prepare_export(chat_history):
if not chat_history:
return None
# Export conversation
content = export_conversation_to_markdown(chat_history)
# Create filename
space_name_safe = re.sub(r'[^a-zA-Z0-9]+', '_', config.get('name', 'AI_Assistant')).lower()
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
filename = f"{space_name_safe}_conversation_{timestamp}.md"
# Save to temp file
temp_path = Path(tempfile.gettempdir()) / filename
temp_path.write_text(content, encoding='utf-8')
return str(temp_path)
export_btn.click(
prepare_export,
inputs=[preview_chatbot],
outputs=[export_btn]
)
def _create_documentation_tab(self):
"""Create the documentation tab with external link"""
with gr.Column():
gr.Markdown("""
# πŸ“š Documentation
For detailed instructions on using ChatUI Helper, please refer to our comprehensive documentation.
### πŸ“– [View Documentation β†’](https://huggingface.co/spaces/milwright/chatui-helper/blob/main/docs.md)
The documentation includes:
- πŸ“– Quick Start Guide
- πŸ“ Step-by-step configuration instructions
- πŸ—³οΈ Deployment guide for HuggingFace Spaces
- πŸ”§ Troubleshooting common issues
- πŸ“š Additional resources and links
---
**Tip:** You can open the documentation in a new tab to reference it while configuring your Space.
""")
def _apply_template(self, template_name, prev_template, cache,
name, tagline, desc, prompt, model, language, temp, tokens, *args):
"""Apply selected template to form fields with caching"""
# Split args into examples and URLs
example_values = args[:5] # First 5 are examples
url_values = args[5:] # Rest are URLs
# First, cache the current values if switching from custom
if prev_template == "None (Custom)" and template_name != "None (Custom)":
# Cache custom values - collect non-empty examples and URLs
examples_list = [ex for ex in example_values if ex and ex.strip()]
urls_list = [url for url in url_values if url and url.strip()]
cache["custom"] = {
'name': name,
'tagline': tagline,
'description': desc,
'system_prompt': prompt,
'model': model,
'language': language,
'temperature': temp,
'max_tokens': tokens,
'examples': examples_list,
'grounding_urls': urls_list
}
# Apply new template values
if template_name == "None (Custom)":
# Restore custom values if they exist
if "custom" in cache:
custom = cache["custom"]
cached_examples = custom.get('examples', [])
cached_urls = custom.get('grounding_urls', [])
# Prepare example updates - fill first 5 fields
example_updates = []
for i in range(5):
if i < len(cached_examples):
example_updates.append(gr.update(value=cached_examples[i]))
else:
example_updates.append(gr.update(value=""))
# Prepare URL updates - fill first 10 fields
url_updates = []
for i in range(10):
if i < len(cached_urls):
url_updates.append(gr.update(value=cached_urls[i]))
else:
url_updates.append(gr.update(value=""))
return [
gr.update(value=custom.get('name', '')),
gr.update(value=custom.get('tagline', '')),
gr.update(value=custom.get('description', '')),
gr.update(value=custom.get('system_prompt', '')),
gr.update(value=custom.get('model', 'google/gemini-2.0-flash-001')),
gr.update(value=custom.get('language', 'English')),
gr.update(value=custom.get('temperature', 0.7)),
gr.update(value=custom.get('max_tokens', 750))
] + example_updates + url_updates + [template_name, cache]
else:
# No cached values, return defaults
default_examples = [
"Hello! How can you help me?",
"Tell me something interesting",
"What can you do?",
"",
""
]
example_updates = [gr.update(value=ex) for ex in default_examples]
url_updates = [gr.update(value="") for _ in range(10)]
return [
gr.update(value='AI Assistant'),
gr.update(value='A customizable AI assistant'),
gr.update(value='A versatile AI assistant powered by advanced language models.'),
gr.update(value='You are a helpful AI assistant.'),
gr.update(value='google/gemini-2.0-flash-001'),
gr.update(value='English'),
gr.update(value=0.7),
gr.update(value=750)
] + example_updates + url_updates + [template_name, cache]
elif template_name in ACADEMIC_TEMPLATES:
template = ACADEMIC_TEMPLATES[template_name]
template_examples = template.get('examples', [])
template_urls = template.get('grounding_urls', [])
# Prepare example updates - fill available examples, empty the rest
example_updates = []
for i in range(5):
if i < len(template_examples):
example_updates.append(gr.update(value=template_examples[i]))
else:
example_updates.append(gr.update(value=""))
# Prepare URL updates - fill available URLs, empty the rest
url_updates = []
for i in range(10):
if i < len(template_urls):
url_updates.append(gr.update(value=template_urls[i]))
else:
url_updates.append(gr.update(value=""))
return [
gr.update(value=template.get('name', '')),
gr.update(value=template.get('tagline', template.get('description', '')[:60])),
gr.update(value=template.get('description', '')),
gr.update(value=template.get('system_prompt', '')),
gr.update(value=template.get('model', 'google/gemini-2.0-flash-001')),
gr.update(value=template.get('language', 'English')),
gr.update(value=template.get('temperature', 0.7)),
gr.update(value=template.get('max_tokens', 750))
] + example_updates + url_updates + [template_name, cache]
else:
# Invalid template, no updates
# 8 basic fields + 5 examples + 10 URLs = 23, plus prev_template and cache = 25 total
return [gr.update() for _ in range(23)] + [prev_template, cache]
def _apply_uploaded_config(self, config_file):
"""Apply uploaded configuration file"""
if not config_file:
# 11 basic + 1 status + 5 examples + 10 URLs = 27 total
return [gr.update() for _ in range(27)]
try:
with open(config_file, 'r') as f:
config = json.load(f)
# Extract values
updates = [
gr.update(value=config.get('name', '')),
gr.update(value=config.get('tagline', config.get('description', '')[:60])),
gr.update(value=config.get('description', '')),
gr.update(value=config.get('system_prompt', '')),
gr.update(value=config.get('model', 'google/gemini-2.0-flash-001')),
gr.update(value=config.get('language', 'English')),
gr.update(value=config.get('theme', 'Default')),
gr.update(value=config.get('api_key_var', 'API_KEY')),
gr.update(value=config.get('temperature', 0.7)),
gr.update(value=config.get('max_tokens', 750)),
gr.update(value=config.get('access_code', ''))
]
# Status message
updates.append(gr.update(
value=f"Configuration loaded successfully",
visible=True
))
# Example updates
examples = config.get('examples', [])
for i in range(5):
if i < len(examples):
updates.append(gr.update(value=examples[i]))
else:
updates.append(gr.update(value=""))
# URL updates
urls = config.get('grounding_urls', [])
for i in range(10):
if i < len(urls):
updates.append(gr.update(value=urls[i]))
else:
updates.append(gr.update(value=""))
return updates
except Exception as e:
error_updates = [gr.update() for _ in range(11)] # Basic fields
error_updates.append(gr.update(
value=f"Error loading configuration: {str(e)}",
visible=True
))
error_updates.extend([gr.update() for _ in range(5)]) # Examples
error_updates.extend([gr.update() for _ in range(10)]) # URLs
return error_updates
def _preview_configuration(self, name, tagline, description, system_prompt, model,
language, theme, api_key_var, temperature, max_tokens,
access_code, *args):
"""Preview the configuration"""
# Split args into examples and URLs
example_values = args[:5] # First 5 are examples
urls = args[5:] # Rest are URLs
# Build configuration
config = {
'name': name or 'AI Assistant',
'tagline': tagline or 'A customizable AI assistant',
'description': description or 'A versatile AI assistant powered by advanced language models.',
'system_prompt': system_prompt or 'You are a helpful AI assistant.',
'model': model,
'language': language,
'theme': theme,
'api_key_var': api_key_var,
'temperature': temperature,
'max_tokens': int(max_tokens),
'access_code': access_code,
'grounding_urls': [url for url in urls if url and url.strip()],
'examples_list': [ex.strip() for ex in example_values if ex and hasattr(ex, 'strip') and ex.strip()],
'preview_ready': True
}
gr.Info("βœ… Preview updated! ⬆️ Switch to the Preview tab to test your assistant.")
return config
def _generate_package(self, name, tagline, description, system_prompt, model,
language, theme, api_key_var, temperature, max_tokens,
access_code, *args):
"""Generate the deployment package"""
try:
# Validate inputs
if not system_prompt:
gr.Error("Please provide a system prompt")
return gr.update(), gr.update(), gr.update(), gr.update(), {}
# Split args into examples and URLs
example_values = args[:5] # First 5 are examples
urls = args[5:] # Rest are URLs
# Process examples
examples_list = [ex.strip() for ex in example_values if ex and hasattr(ex, 'strip') and ex.strip()]
examples_python = repr(examples_list)
# Process URLs
grounding_urls = [url.strip() for url in urls if url and hasattr(url, 'strip') and url.strip()]
# Create configuration
config = {
'name': repr(name or 'AI Assistant'),
'description': repr(tagline or 'A customizable AI assistant'),
'system_prompt': repr(system_prompt),
'model': repr(model),
'api_key_var': repr(api_key_var),
'temperature': temperature,
'max_tokens': int(max_tokens),
'examples': examples_python,
'grounding_urls': json.dumps(grounding_urls),
'enable_dynamic_urls': True,
'enable_file_upload': True,
'theme': repr(theme),
'language': repr(language)
}
# Generate files
template = get_template()
app_content = template.format(**config)
requirements_content = """gradio>=5.42.0
requests>=2.32.3
beautifulsoup4>=4.12.3
python-dotenv>=1.0.0
huggingface-hub>=0.20.0"""
config_json = {
'name': name or 'AI Assistant',
'tagline': tagline or 'A customizable AI assistant',
'description': description or 'A versatile AI assistant powered by advanced language models.',
'system_prompt': system_prompt,
'model': model,
'language': language,
'api_key_var': api_key_var,
'temperature': temperature,
'max_tokens': int(max_tokens),
'examples': examples_list,
'grounding_urls': grounding_urls,
'enable_dynamic_urls': True,
'enable_file_upload': True,
'theme': theme
}
# Create README
readme_content = self._create_readme(
name or 'AI Assistant',
tagline or 'A customizable AI assistant',
description or 'A versatile AI assistant powered by advanced language models. Configure it to meet your specific needs with custom prompts, examples, and grounding URLs.',
model,
api_key_var,
access_code
)
# Create zip file
filename = create_safe_filename(name or 'ai_assistant', suffix='.zip')
zip_buffer = io.BytesIO()
with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as zip_file:
zip_file.writestr('app.py', app_content)
zip_file.writestr('requirements.txt', requirements_content)
zip_file.writestr('config.json', json.dumps(config_json, indent=2))
zip_file.writestr('README.md', readme_content)
# Save zip file
zip_buffer.seek(0)
with open(filename, 'wb') as f:
f.write(zip_buffer.getvalue())
# Success message
title_msg = f"**πŸŽ‰ Deployment package ready!**\n\n**File**: `{filename}`"
details_msg = f"""**Package Contents:**
- `app.py` - Ready-to-deploy Gradio application
- `requirements.txt` - Python dependencies
- `config.json` - Configuration backup
- `README.md` - Deployment instructions
**Next Steps:**
1. Download the package below
2. Create a new HuggingFace Space
3. Upload all files from the package
4. In HF Space settings:
-- set your `{api_key_var}` secret (required)
-- set `HF_TOKEN` for persistent customization (free, recommended)
-- set `ACCESS_CODE` secret for access control (custom, optional)
5. Watch as the space builds and deploys automatically
"""
return (
gr.update(visible=True),
gr.update(value=title_msg, visible=True),
gr.update(value=filename, visible=True),
gr.update(value=details_msg, visible=True),
config_json
)
except Exception as e:
return (
gr.update(visible=True),
gr.update(value=f"Error: {str(e)}", visible=True),
gr.update(visible=False),
gr.update(visible=False),
{}
)
def _format_config_display(self, config):
"""Format configuration for display"""
return f"""**Model:** {config.get('model', 'Not set')}
**Temperature:** {config.get('temperature', 0.7)}
**Max Tokens:** {config.get('max_tokens', 750)}
**Theme:** {config.get('theme', 'Default')}
**System Prompt:**
{config.get('system_prompt', 'Not set')}"""
def _create_readme(self, title, tagline, description, model, api_key_var, access_code):
"""Create README.md content"""
emoji = "πŸ’¬"
access_section = ""
if access_code:
access_section = f"""
### Step 3: Set Access Code
1. In Settings β†’ Variables and secrets
2. Add secret: `ACCESS_CODE`
3. Set your chosen password
4. Share with authorized users
"""
return f"""---
title: {title}
emoji: {emoji}
colorFrom: blue
colorTo: green
sdk: gradio
sdk_version: 5.42.0
app_file: app.py
pinned: false
license: mit
short_description: {tagline}
---
# {title}
{description}
## Quick Setup
### Step 1: Configure API Key (Required)
1. Get your API key from https://openrouter.ai/keys
2. In Settings β†’ Variables and secrets
3. Add secret: `{api_key_var}`
4. Paste your OpenRouter API key
### Step 2: Configure HuggingFace Token (Optional)
1. Get your token from https://huggingface.co/settings/tokens
2. In Settings β†’ Variables and secrets
3. Add secret: `HF_TOKEN`
4. Paste your HuggingFace token (needs write permissions)
5. This enables automatic configuration updates
{access_section}
### Step 3: Test Your Space
Your Space should now be running! Try the example prompts or ask your own questions.
## Configuration
- **Model**: {model}
- **API Key Variable**: {api_key_var}
- **HF Token Variable**: HF_TOKEN (for auto-updates)
{f"- **Access Control**: Enabled (ACCESS_CODE)" if access_code else "- **Access**: Public"}
## Support
For help, visit the HuggingFace documentation or community forums."""
def main():
"""Main entry point"""
generator = SpaceGenerator()
demo = generator.create_interface()
demo.launch(share=True)
if __name__ == "__main__":
main()