Spaces:
Runtime error
Runtime error
#!/usr/bin/env python3 | |
""" | |
AI-Powered Personal Learning Assistant | |
Version 2.0 - Clean Production Build for HuggingFace Spaces | |
Gradio Agents & MCP Hackathon 2025 | |
Features: | |
- Multi-agent AI reasoning with smolagents | |
- Voice AI processing with SambaNova Cloud | |
- Real-time data integration via MCP | |
- ZeroGPU optimized for HuggingFace Spaces | |
""" | |
import os | |
import gc | |
import sys | |
import logging | |
import sqlite3 | |
import tempfile | |
import traceback | |
from pathlib import Path | |
from typing import Dict, List, Tuple, Optional, Any | |
from datetime import datetime, timedelta | |
# Core dependencies | |
import gradio as gr | |
import requests | |
import plotly.graph_objects as go | |
import plotly.express as px | |
from dotenv import load_dotenv | |
# Audio processing | |
try: | |
import speech_recognition as sr | |
import pydub | |
import soundfile as sf | |
AUDIO_AVAILABLE = True | |
except ImportError as e: | |
AUDIO_AVAILABLE = False | |
print(f"β οΈ Audio libraries not available: {e}") | |
# AI and ML dependencies with graceful fallbacks | |
try: | |
import transformers | |
TRANSFORMERS_AVAILABLE = True | |
except ImportError: | |
TRANSFORMERS_AVAILABLE = False | |
print("β οΈ Transformers not available") | |
try: | |
import smolagents | |
from smolagents import CodeAgent, ReactCodeAgent, tool, HfApiModel | |
SMOLAGENTS_AVAILABLE = True | |
except ImportError: | |
SMOLAGENTS_AVAILABLE = False | |
print("β οΈ Smolagents not available - using fallback mode") | |
# Define tool decorator fallback BEFORE using it | |
if not SMOLAGENTS_AVAILABLE: | |
def tool(func): | |
"""Fallback tool decorator when smolagents is not available""" | |
func._is_tool = True | |
return func | |
else: | |
# Import tool from smolagents if available | |
pass # tool is already imported above | |
# HuggingFace Spaces support | |
try: | |
import spaces | |
SPACES_AVAILABLE = True | |
except ImportError: | |
SPACES_AVAILABLE = False | |
# Mock spaces decorator for local development | |
class spaces: | |
def GPU(func): | |
return func | |
# Environment setup | |
load_dotenv() | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
# Global configuration | |
SAMBANOVA_API_KEY = os.getenv("SAMBANOVA_API_KEY") | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
SAMBANOVA_AVAILABLE = bool(SAMBANOVA_API_KEY) | |
# ============================================================================ | |
# Database Layer - Learning Progress Tracking | |
# ============================================================================ | |
class LearningDatabase: | |
"""SQLite database for tracking learning progress and user profiles""" | |
def __init__(self, db_path: str = "learning_assistant.db"): | |
self.db_path = db_path | |
self.init_database() | |
def init_database(self): | |
"""Initialize database tables""" | |
try: | |
with sqlite3.connect(self.db_path) as conn: | |
cursor = conn.cursor() | |
# User profiles table | |
cursor.execute(""" | |
CREATE TABLE IF NOT EXISTS user_profiles ( | |
id INTEGER PRIMARY KEY AUTOINCREMENT, | |
name TEXT NOT NULL, | |
learning_style TEXT, | |
goals TEXT, | |
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP | |
) | |
""") | |
# Learning sessions table | |
cursor.execute(""" | |
CREATE TABLE IF NOT EXISTS learning_sessions ( | |
id INTEGER PRIMARY KEY AUTOINCREMENT, | |
user_id INTEGER, | |
subject TEXT NOT NULL, | |
level TEXT, | |
session_data TEXT, | |
progress_score REAL, | |
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, | |
FOREIGN KEY (user_id) REFERENCES user_profiles (id) | |
) | |
""") | |
# Progress tracking table | |
cursor.execute(""" | |
CREATE TABLE IF NOT EXISTS progress_tracking ( | |
id INTEGER PRIMARY KEY AUTOINCREMENT, | |
user_id INTEGER, | |
subject TEXT, | |
skill TEXT, | |
mastery_level REAL, | |
last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, | |
FOREIGN KEY (user_id) REFERENCES user_profiles (id) | |
) | |
""") | |
conn.commit() | |
logger.info("β Database initialized successfully") | |
except Exception as e: | |
logger.error(f"β Database initialization failed: {e}") | |
# ============================================================================ | |
# Multi-Agent AI System with Smolagents | |
# ============================================================================ | |
class LearningAgents: | |
"""Multi-agent system using smolagents for advanced reasoning""" | |
def __init__(self): | |
self.agents_available = SMOLAGENTS_AVAILABLE | |
self.setup_agents() | |
def setup_agents(self): | |
"""Initialize smolagents-based multi-agent system""" | |
if not self.agents_available: | |
logger.warning("β οΈ Smolagents not available, using fallback agents") | |
self.setup_fallback_agents() | |
return | |
try: | |
# Initialize smolagents with proper configuration | |
from smolagents import HfApiModel | |
# Use HuggingFace models for reasoning | |
model = HfApiModel("microsoft/DialoGPT-medium") | |
# Create specialized agents | |
self.curriculum_agent = self.create_curriculum_agent(model) | |
self.content_agent = self.create_content_agent(model) | |
self.assessment_agent = self.create_assessment_agent(model) | |
logger.info("β Smolagents multi-agent system initialized") | |
except Exception as e: | |
logger.error(f"β Smolagents setup failed: {e}") | |
self.agents_available = False | |
self.setup_fallback_agents() | |
def create_curriculum_agent(self, model): | |
"""Create curriculum planning agent with smolagents""" | |
if not self.agents_available: | |
return self.create_fallback_agent("curriculum") | |
try: | |
agent = CodeAgent( | |
tools=[self.curriculum_planning_tool], | |
model=model, | |
max_iterations=3 | |
) | |
return agent | |
except Exception as e: | |
logger.error(f"Curriculum agent creation failed: {e}") | |
return self.create_fallback_agent("curriculum") | |
def create_content_agent(self, model): | |
"""Create content generation agent with smolagents""" | |
if not self.agents_available: | |
return self.create_fallback_agent("content") | |
try: | |
agent = ReactCodeAgent( | |
tools=[self.content_generation_tool], | |
model=model, | |
max_iterations=2 | |
) | |
return agent | |
except Exception as e: | |
logger.error(f"Content agent creation failed: {e}") | |
return self.create_fallback_agent("content") | |
def create_assessment_agent(self, model): | |
"""Create assessment agent with smolagents""" | |
if not self.agents_available: | |
return self.create_fallback_agent("assessment") | |
try: | |
agent = CodeAgent( | |
tools=[self.assessment_generation_tool], | |
model=model, | |
max_iterations=2 | |
) | |
return agent | |
except Exception as e: | |
logger.error(f"Assessment agent creation failed: {e}") | |
return self.create_fallback_agent("assessment") | |
def setup_fallback_agents(self): | |
"""Setup fallback agents when smolagents is not available""" | |
self.curriculum_agent = self.create_fallback_agent("curriculum") | |
self.content_agent = self.create_fallback_agent("content") | |
self.assessment_agent = self.create_fallback_agent("assessment") | |
def create_fallback_agent(self, agent_type: str): | |
"""Create fallback agent for when smolagents is unavailable""" | |
class FallbackAgent: | |
def __init__(self, agent_type): | |
self.agent_type = agent_type | |
def run(self, prompt): | |
return f"π **Fallback {self.agent_type.title()} Agent**\n\n{self.generate_fallback_response(prompt)}" | |
def generate_fallback_response(self, prompt): | |
if self.agent_type == "curriculum": | |
return self.generate_curriculum_fallback(prompt) | |
elif self.agent_type == "content": | |
return self.generate_content_fallback(prompt) | |
elif self.agent_type == "assessment": | |
return self.generate_assessment_fallback(prompt) | |
else: | |
return "This feature requires smolagents. Please install with: pip install smolagents" | |
return FallbackAgent(agent_type) | |
# Tool methods - decorated only when smolagents is available | |
def curriculum_planning_tool(self, subject: str, level: str, goals: str) -> str: | |
"""Curriculum planning tool for smolagents""" | |
return f""" | |
# π AI-Generated Curriculum: {subject} | |
## π― Learning Path for {level.title()} Level | |
### Phase 1: Foundation Building | |
- Core concepts and terminology | |
- Essential prerequisites review | |
- Hands-on introduction exercises | |
### Phase 2: Skill Development | |
- Practical application projects | |
- Guided practice sessions | |
- Real-world case studies | |
### Phase 3: Advanced Application | |
- Complex problem solving | |
- Integration with other topics | |
- Portfolio development | |
### π Progress Milestones | |
1. **Week 1-2**: Foundation mastery | |
2. **Week 3-4**: Practical application | |
3. **Week 5-6**: Advanced projects | |
**Personalized for:** {goals} | |
""" | |
def content_generation_tool(self, topic: str, difficulty: str) -> str: | |
"""Content generation tool for smolagents""" | |
return f""" | |
# π Learning Content: {topic} | |
## π Overview | |
This {difficulty}-level content covers essential concepts in {topic}. | |
## π― Key Learning Objectives | |
- Understand fundamental principles | |
- Apply concepts to real scenarios | |
- Develop practical skills | |
## π Content Structure | |
1. **Introduction & Context** | |
2. **Core Concepts** | |
3. **Practical Examples** | |
4. **Hands-on Exercises** | |
5. **Assessment & Review** | |
## π Next Steps | |
Continue with advanced topics or apply skills in projects. | |
""" | |
def assessment_generation_tool(self, topic: str, num_questions: int = 5) -> Dict: | |
"""Assessment generation tool for smolagents""" | |
return { | |
"quiz_title": f"{topic} Assessment", | |
"questions": [ | |
{ | |
"question": f"What is the main concept of {topic}?", | |
"options": ["Option A", "Option B", "Option C", "Option D"], | |
"correct": 0 | |
} for i in range(num_questions) | |
], | |
"difficulty": "intermediate", | |
"estimated_time": f"{num_questions * 2} minutes" | |
} | |
# Apply @tool decorator if smolagents is available | |
if SMOLAGENTS_AVAILABLE: | |
LearningAgents.curriculum_planning_tool = tool(LearningAgents.curriculum_planning_tool) | |
LearningAgents.content_generation_tool = tool(LearningAgents.content_generation_tool) | |
LearningAgents.assessment_generation_tool = tool(LearningAgents.assessment_generation_tool) | |
# ============================================================================ | |
# SambaNova Audio AI Integration | |
# ============================================================================ | |
class SambaNovaAudioAI: | |
"""SambaNova Cloud integration for Qwen2-Audio-7B-Instruct processing""" | |
def __init__(self): | |
self.api_key = SAMBANOVA_API_KEY | |
self.available = bool(self.api_key) and AUDIO_AVAILABLE | |
self.base_url = "https://api.sambanova.ai/v1" | |
if not self.available: | |
logger.warning("β οΈ SambaNova Audio AI not available") | |
def process_audio_with_qwen(self, audio_path: str, prompt: str = None) -> Dict: | |
"""Process audio with Qwen2-Audio-7B-Instruct model""" | |
if not self.available: | |
return { | |
"error": "SambaNova Audio AI not available", | |
"message": "Please set SAMBANOVA_API_KEY environment variable" | |
} | |
try: | |
# Convert audio to required format | |
audio_data = self.prepare_audio(audio_path) | |
# Prepare request for SambaNova API | |
headers = { | |
"Authorization": f"Bearer {self.api_key}", | |
"Content-Type": "application/json" | |
} | |
payload = { | |
"model": "Qwen2-Audio-7B-Instruct", | |
"messages": [ | |
{ | |
"role": "user", | |
"content": [ | |
{"type": "text", "text": prompt or "Analyze this audio and provide educational insights"}, | |
{"type": "audio", "audio": audio_data} | |
] | |
} | |
], | |
"max_tokens": 1000, | |
"temperature": 0.7 | |
} | |
response = requests.post( | |
f"{self.base_url}/chat/completions", | |
headers=headers, | |
json=payload, | |
timeout=30 | |
) | |
if response.status_code == 200: | |
return response.json() | |
else: | |
return { | |
"error": f"API request failed with status {response.status_code}", | |
"details": response.text | |
} | |
except Exception as e: | |
logger.error(f"SambaNova API error: {e}") | |
return { | |
"error": "Audio processing failed", | |
"details": str(e) | |
} | |
def prepare_audio(self, audio_path: str) -> str: | |
"""Prepare audio for SambaNova API""" | |
try: | |
# Convert to WAV format if needed | |
if not audio_path.endswith('.wav'): | |
audio = pydub.AudioSegment.from_file(audio_path) | |
wav_path = audio_path.replace(Path(audio_path).suffix, '.wav') | |
audio.export(wav_path, format="wav") | |
audio_path = wav_path | |
# Read and encode audio | |
import base64 | |
with open(audio_path, 'rb') as f: | |
audio_data = base64.b64encode(f.read()).decode('utf-8') | |
return audio_data | |
except Exception as e: | |
logger.error(f"Audio preparation failed: {e}") | |
raise | |
def generate_learning_plan_from_audio(self, audio_path: str) -> str: | |
"""Generate learning plan from audio input""" | |
prompt = """ | |
Listen to this audio and create a comprehensive learning plan. | |
Include: | |
1. Identified learning goals from the audio | |
2. Recommended curriculum structure | |
3. Timeline and milestones | |
4. Resources and next steps | |
""" | |
result = self.process_audio_with_qwen(audio_path, prompt) | |
if "error" in result: | |
return f"β **Audio Processing Error**: {result['error']}" | |
try: | |
content = result['choices'][0]['message']['content'] | |
return f""" | |
# π€ Audio-Generated Learning Plan | |
{content} | |
--- | |
*Generated by Qwen2-Audio-7B-Instruct via SambaNova Cloud* | |
""" | |
except (KeyError, IndexError) as e: | |
return f"β **Response Processing Error**: {e}" | |
def answer_audio_question(self, audio_path: str) -> str: | |
"""Answer questions from audio input""" | |
prompt = """ | |
Listen to this audio question and provide a comprehensive educational answer. | |
Structure your response with: | |
1. Clear explanation of the concept | |
2. Practical examples | |
3. Additional learning resources | |
4. Related topics to explore | |
""" | |
result = self.process_audio_with_qwen(audio_path, prompt) | |
if "error" in result: | |
return f"β **Audio Processing Error**: {result['error']}" | |
try: | |
content = result['choices'][0]['message']['content'] | |
return f""" | |
# π€ Audio Q&A Response | |
{content} | |
--- | |
*Powered by Qwen2-Audio-7B-Instruct* | |
""" | |
except (KeyError, IndexError) as e: | |
return f"β **Response Processing Error**: {e}" | |
def convert_speech_to_text(self, audio_path: str) -> str: | |
"""Convert speech to text using local speech recognition""" | |
if not AUDIO_AVAILABLE: | |
return "β Speech recognition not available" | |
try: | |
recognizer = sr.Recognizer() | |
with sr.AudioFile(audio_path) as source: | |
audio_data = recognizer.record(source) | |
text = recognizer.recognize_google(audio_data) | |
return f"**Transcribed Text**: {text}" | |
except Exception as e: | |
return f"β **Speech Recognition Error**: {e}" | |
# ============================================================================ | |
# Main Learning Assistant Class | |
# ============================================================================ | |
class LearningAssistant: | |
"""Main learning assistant orchestrating all components""" | |
def __init__(self): | |
self.db = LearningDatabase() | |
self.agents = LearningAgents() | |
self.audio_ai = SambaNovaAudioAI() | |
logger.info("β Learning Assistant initialized") | |
def generate_curriculum_with_multistep_reasoning(self, subject: str, level: str, goals: str) -> str: | |
"""Generate curriculum using multi-step AI reasoning""" | |
try: | |
if self.agents.agents_available: | |
# Use smolagents for advanced reasoning | |
prompt = f""" | |
Create a comprehensive curriculum for: | |
Subject: {subject} | |
Level: {level} | |
Goals: {goals} | |
Use multi-step reasoning to analyze prerequisites, create learning phases, and establish milestones. | |
""" | |
result = self.agents.curriculum_agent.run(prompt) | |
return result | |
else: | |
# Fallback curriculum generation | |
return self.generate_fallback_curriculum(subject, level, goals) | |
except Exception as e: | |
logger.error(f"Curriculum generation error: {e}") | |
return f"β **Error**: {e}\n\nPlease try again or use the fallback interface." | |
def generate_fallback_curriculum(self, subject: str, level: str, goals: str) -> str: | |
"""Fallback curriculum generation when agents are unavailable""" | |
return f""" | |
# π Learning Curriculum: {subject} | |
## π― Customized for {level.title()} Level | |
### π Learning Goals | |
{goals} | |
### ποΈ Structured Learning Path | |
#### Phase 1: Foundation (Weeks 1-2) | |
- **Core Concepts**: Introduction to {subject} fundamentals | |
- **Prerequisites**: Review essential background knowledge | |
- **Initial Projects**: Hands-on practice exercises | |
#### Phase 2: Development (Weeks 3-4) | |
- **Skill Building**: Intermediate concepts and techniques | |
- **Practical Applications**: Real-world project work | |
- **Problem Solving**: Guided challenges and exercises | |
#### Phase 3: Mastery (Weeks 5-6) | |
- **Advanced Topics**: Complex applications and integrations | |
- **Portfolio Development**: Showcase projects | |
- **Knowledge Integration**: Connecting concepts across domains | |
### π Progress Tracking | |
- **Weekly Assessments**: Track understanding and skill development | |
- **Milestone Projects**: Demonstrate cumulative learning | |
- **Peer Reviews**: Collaborative learning opportunities | |
### π Recommended Resources | |
- Online courses and tutorials | |
- Practice platforms and tools | |
- Community forums and support groups | |
### π― Next Steps | |
Continue to advanced topics or apply skills in specialized areas. | |
--- | |
*Generated by AI Learning Assistant - Fallback Mode* | |
""" | |
def process_audio_learning_request(self, audio_input) -> str: | |
"""Process audio input for learning plan generation""" | |
if not audio_input: | |
return "β **Error**: No audio provided" | |
try: | |
# Save audio from Gradio input | |
audio_path = self.save_gradio_audio(audio_input) | |
# Process with SambaNova | |
result = self.audio_ai.generate_learning_plan_from_audio(audio_path) | |
# Cleanup temp file | |
self.cleanup_temp_file(audio_path) | |
return result | |
except Exception as e: | |
logger.error(f"Audio processing error: {e}") | |
return f"β **Audio Processing Failed**: {e}" | |
def answer_audio_question(self, audio_input) -> str: | |
"""Answer questions from audio input""" | |
if not audio_input: | |
return "β **Error**: No audio provided" | |
try: | |
audio_path = self.save_gradio_audio(audio_input) | |
result = self.audio_ai.answer_audio_question(audio_path) | |
self.cleanup_temp_file(audio_path) | |
return result | |
except Exception as e: | |
logger.error(f"Audio Q&A error: {e}") | |
return f"β **Audio Q&A Failed**: {e}" | |
def convert_audio_to_text(self, audio_input) -> str: | |
"""Convert audio to text""" | |
if not audio_input: | |
return "β **Error**: No audio provided" | |
try: | |
audio_path = self.save_gradio_audio(audio_input) | |
result = self.audio_ai.convert_speech_to_text(audio_path) | |
self.cleanup_temp_file(audio_path) | |
return result | |
except Exception as e: | |
logger.error(f"Speech-to-text error: {e}") | |
return f"β **Speech Recognition Failed**: {e}" | |
def save_gradio_audio(self, audio_input) -> str: | |
"""Save Gradio audio input to temporary file""" | |
try: | |
if isinstance(audio_input, str): | |
# Already a file path | |
return audio_input | |
elif hasattr(audio_input, 'name'): | |
# File object | |
return audio_input.name | |
else: | |
# Handle other audio input types | |
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.wav') | |
temp_file.write(audio_input) | |
temp_file.close() | |
return temp_file.name | |
except Exception as e: | |
logger.error(f"Audio saving error: {e}") | |
raise | |
def cleanup_temp_file(self, file_path: str): | |
"""Clean up temporary files""" | |
try: | |
if os.path.exists(file_path) and 'tmp' in file_path: | |
os.unlink(file_path) | |
except Exception as e: | |
logger.warning(f"Cleanup warning: {e}") | |
def create_user_profile(self, name: str, learning_style: str, goals: str) -> str: | |
"""Create and store user learning profile""" | |
try: | |
with sqlite3.connect(self.db.db_path) as conn: | |
cursor = conn.cursor() | |
cursor.execute( | |
"INSERT INTO user_profiles (name, learning_style, goals) VALUES (?, ?, ?)", | |
(name, learning_style, goals) | |
) | |
conn.commit() | |
return f"β **Profile Created** for {name}\n\n**Learning Style**: {learning_style}\n**Goals**: {goals}" | |
except Exception as e: | |
return f"β **Profile Creation Failed**: {e}" | |
# ============================================================================ | |
# Clean Gradio Interface | |
# ============================================================================ | |
def create_learning_interface(): | |
"""Create clean, production-ready Gradio interface""" | |
# Initialize learning assistant | |
learning_assistant = LearningAssistant() | |
# Custom CSS for better styling | |
custom_css = """ | |
.gradio-container { | |
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; | |
} | |
.main-header { | |
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
color: white; | |
padding: 2rem; | |
border-radius: 10px; | |
text-align: center; | |
margin-bottom: 2rem; | |
} | |
.feature-card { | |
background: #f8f9fa; | |
border-radius: 10px; | |
padding: 1.5rem; | |
margin: 1rem 0; | |
border-left: 4px solid #667eea; | |
} | |
.status-indicator { | |
padding: 0.5rem; | |
border-radius: 5px; | |
margin: 0.5rem 0; | |
} | |
.status-success { background: #d4edda; color: #155724; } | |
.status-warning { background: #fff3cd; color: #856404; } | |
.status-error { background: #f8d7da; color: #721c24; } | |
""" | |
with gr.Blocks(css=custom_css, title="AI Learning Assistant v2.0") as interface: | |
# Header | |
gr.HTML(""" | |
<div class="main-header"> | |
<h1>π€ AI-Powered Personal Learning Assistant</h1> | |
<p><strong>Version 2.0</strong> - Clean Production Build</p> | |
<p>Multi-Agent Reasoning β’ Voice AI β’ Real-Time Data β’ ZeroGPU Optimized</p> | |
</div> | |
""") | |
# System Status | |
with gr.Row(): | |
with gr.Column(): | |
status_html = f""" | |
<div class="feature-card"> | |
<h3>π§ System Status</h3> | |
<div class="status-indicator {'status-success' if SMOLAGENTS_AVAILABLE else 'status-warning'}"> | |
π§ <strong>Smolagents</strong>: {'Available' if SMOLAGENTS_AVAILABLE else 'Fallback Mode'} | |
</div> | |
<div class="status-indicator {'status-success' if SAMBANOVA_AVAILABLE else 'status-warning'}"> | |
π€ <strong>SambaNova Audio AI</strong>: {'Available' if SAMBANOVA_AVAILABLE else 'Not Configured'} | |
</div> | |
<div class="status-indicator {'status-success' if AUDIO_AVAILABLE else 'status-warning'}"> | |
π <strong>Audio Processing</strong>: {'Available' if AUDIO_AVAILABLE else 'Limited'} | |
</div> | |
<div class="status-indicator {'status-success' if SPACES_AVAILABLE else 'status-warning'}"> | |
βοΈ <strong>HuggingFace Spaces</strong>: {'Available' if SPACES_AVAILABLE else 'Local Mode'} | |
</div> | |
</div> | |
""" | |
gr.HTML(status_html) | |
# Main Interface Tabs | |
with gr.Tabs(): | |
# Tab 1: Smart Curriculum Generation | |
with gr.Tab("π Smart Curriculum"): | |
gr.HTML('<div class="feature-card"><h3>π§ AI-Powered Curriculum Generation</h3></div>') | |
with gr.Row(): | |
with gr.Column(): | |
subject_input = gr.Textbox( | |
label="π Subject/Topic", | |
placeholder="e.g., Python Programming, Data Science, Machine Learning", | |
lines=1 | |
) | |
level_input = gr.Dropdown( | |
choices=["beginner", "intermediate", "advanced"], | |
label="π Current Level", | |
value="beginner" | |
) | |
goals_input = gr.Textbox( | |
label="π― Learning Goals", | |
placeholder="What do you want to achieve? Any specific timeline?", | |
lines=3 | |
) | |
generate_btn = gr.Button( | |
"π Generate Smart Curriculum", | |
variant="primary", | |
size="lg" | |
) | |
with gr.Column(): | |
curriculum_output = gr.Markdown( | |
label="Generated Curriculum", | |
value="*Ready to generate personalized curriculum...*" | |
) | |
# Event handler | |
def generate_curriculum(subject, level, goals): | |
if not all([subject.strip(), level.strip(), goals.strip()]): | |
return "β **Error**: Please fill in all fields" | |
try: | |
return learning_assistant.generate_curriculum_with_multistep_reasoning(subject, level, goals) | |
except Exception as e: | |
return f"β **Generation Failed**: {str(e)}" | |
generate_btn.click( | |
generate_curriculum, | |
inputs=[subject_input, level_input, goals_input], | |
outputs=[curriculum_output] | |
) | |
# Tab 2: Voice AI Learning | |
with gr.Tab("π€ Voice AI"): | |
gr.HTML('<div class="feature-card"><h3>π΅ Voice-Powered Learning with SambaNova</h3></div>') | |
with gr.Row(): | |
with gr.Column(): | |
# Clean Audio component - NO deprecated parameters | |
audio_input = gr.Audio( | |
label="π€ Record Your Learning Request", | |
type="filepath" | |
) | |
audio_type = gr.Radio( | |
choices=["Learning Plan", "Q&A Answer", "Speech-to-Text"], | |
label="π― Processing Type", | |
value="Learning Plan" | |
) | |
process_btn = gr.Button( | |
"π€ Process with Qwen2-Audio", | |
variant="primary", | |
size="lg" | |
) | |
with gr.Column(): | |
audio_output = gr.Markdown( | |
label="AI Audio Response", | |
value="""π€ **Ready for Voice Processing** | |
**Instructions:** | |
1. Click the microphone to record your voice | |
2. Choose processing type (Learning Plan, Q&A, or Speech-to-Text) | |
3. Click "Process" to send to Qwen2-Audio-7B-Instruct | |
*Powered by SambaNova Cloud*""" | |
) | |
# Audio processing handler | |
def process_audio(audio_file, processing_type): | |
if not audio_file: | |
return "β **Error**: No audio provided" | |
try: | |
if processing_type == "Learning Plan": | |
return learning_assistant.process_audio_learning_request(audio_file) | |
elif processing_type == "Q&A Answer": | |
return learning_assistant.answer_audio_question(audio_file) | |
elif processing_type == "Speech-to-Text": | |
return learning_assistant.convert_audio_to_text(audio_file) | |
else: | |
return "β **Error**: Invalid processing type" | |
except Exception as e: | |
return f"β **Processing Failed**: {str(e)}" | |
process_btn.click( | |
process_audio, | |
inputs=[audio_input, audio_type], | |
outputs=[audio_output] | |
) | |
# Tab 3: User Profile | |
with gr.Tab("π€ Profile"): | |
gr.HTML('<div class="feature-card"><h3>π Create Your Learning Profile</h3></div>') | |
with gr.Row(): | |
with gr.Column(): | |
name_input = gr.Textbox( | |
label="π€ Your Name", | |
placeholder="Enter your name" | |
) | |
style_input = gr.Dropdown( | |
choices=["Visual", "Auditory", "Kinesthetic", "Reading/Writing"], | |
label="π¨ Learning Style", | |
value="Visual" | |
) | |
profile_goals = gr.Textbox( | |
label="π― Learning Goals", | |
placeholder="What do you want to learn?", | |
lines=3 | |
) | |
create_profile_btn = gr.Button( | |
"β Create Profile", | |
variant="primary" | |
) | |
with gr.Column(): | |
profile_output = gr.Markdown( | |
label="Profile Status", | |
value="*Ready to create your learning profile...*" | |
) | |
create_profile_btn.click( | |
learning_assistant.create_user_profile, | |
inputs=[name_input, style_input, profile_goals], | |
outputs=[profile_output] | |
) | |
# Footer | |
gr.HTML(""" | |
<div style="text-align: center; margin-top: 2rem; padding: 2rem; background: #f8f9fa; border-radius: 10px;"> | |
<p><strong>π Gradio Agents & MCP Hackathon 2025</strong></p> | |
<p>Multi-Agent AI β’ Voice Processing β’ Real-Time Data β’ ZeroGPU Optimized</p> | |
<p style="color: #666; font-size: 0.9rem;">Version 2.0 - Production Ready</p> | |
</div> | |
""") | |
return interface | |
# ============================================================================ | |
# Application Entry Point | |
# ============================================================================ | |
if __name__ == "__main__": | |
print("π Starting AI Learning Assistant v2.0...") | |
print("π§ Clean Production Build - HuggingFace Spaces Ready") | |
# System status logging | |
print(f"β Smolagents: {'Available' if SMOLAGENTS_AVAILABLE else 'Fallback Mode'}") | |
print(f"β SambaNova Audio: {'Available' if SAMBANOVA_AVAILABLE else 'Not Configured'}") | |
print(f"β Audio Processing: {'Available' if AUDIO_AVAILABLE else 'Limited'}") | |
print(f"β HuggingFace Spaces: {'Available' if SPACES_AVAILABLE else 'Local Mode'}") | |
print("π Launching clean interface...") | |
# Create and launch interface | |
interface = create_learning_interface() | |
# Launch with optimal settings for HuggingFace Spaces | |
interface.launch( | |
share=False, | |
show_error=True, | |
show_api=False, | |
server_name="0.0.0.0", | |
server_port=7860 | |
) |