Spaces:
Running
Running
File size: 10,723 Bytes
614d951 d7f3d04 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 |
import gradio as gr
import logging
from config import config
from app import SentimentApp
# Optimized Gradio Interface
def create_interface():
"""Create comprehensive Gradio interface with optimizations"""
app = SentimentApp()
with gr.Blocks(theme=gr.themes.Soft(), title="Multilingual Sentiment Analyzer") as demo:
gr.Markdown("# π Multilingual Sentiment Analyzer")
gr.Markdown("AI-powered sentiment analysis with SHAP & LIME explainable AI features")
with gr.Tab("Single Analysis"):
with gr.Row():
with gr.Column():
text_input = gr.Textbox(
label="Enter Text for Analysis",
placeholder="Enter your text in any supported language...",
lines=5
)
with gr.Row():
language_selector = gr.Dropdown(
choices=list(config.SUPPORTED_LANGUAGES.values()),
value="Auto Detect",
label="Language"
)
theme_selector = gr.Dropdown(
choices=list(config.THEMES.keys()),
value="default",
label="Theme"
)
with gr.Row():
clean_text_cb = gr.Checkbox(label="Clean Text", value=False)
remove_punct_cb = gr.Checkbox(label="Remove Punctuation", value=False)
remove_nums_cb = gr.Checkbox(label="Remove Numbers", value=False)
analyze_btn = gr.Button("Analyze", variant="primary", size="lg")
gr.Examples(
examples=app.examples,
inputs=text_input,
cache_examples=False
)
with gr.Column():
result_output = gr.Textbox(label="Analysis Results", lines=8)
with gr.Row():
gauge_plot = gr.Plot(label="Sentiment Gauge")
probability_plot = gr.Plot(label="Probability Distribution")
# FIXED Advanced Analysis Tab
with gr.Tab("Advanced Analysis"):
gr.Markdown("## Explainable AI Analysis")
gr.Markdown("**SHAP and LIME analysis with FIXED implementation** - now handles text input correctly!")
with gr.Row():
with gr.Column():
advanced_text_input = gr.Textbox(
label="Enter Text for Advanced Analysis",
placeholder="Enter text to analyze with SHAP and LIME...",
lines=6,
value="This movie is absolutely fantastic and amazing!"
)
with gr.Row():
advanced_language = gr.Dropdown(
choices=list(config.SUPPORTED_LANGUAGES.values()),
value="Auto Detect",
label="Language"
)
num_samples_slider = gr.Slider(
minimum=50,
maximum=300,
value=100,
step=25,
label="Number of Samples",
info="Lower = Faster, Higher = More Accurate"
)
with gr.Row():
shap_btn = gr.Button("SHAP Analysis", variant="primary")
lime_btn = gr.Button("LIME Analysis", variant="secondary")
gr.Markdown("""
**π Analysis Methods:**
- **SHAP**: Token-level importance scores using Text masker
- **LIME**: Feature importance through text perturbation
**β‘ Expected Performance:**
- 50 samples: ~10-20s | 100 samples: ~20-40s | 200+ samples: ~40-80s
""")
with gr.Column():
advanced_results = gr.Textbox(label="Analysis Summary", lines=12)
with gr.Row():
advanced_plot = gr.Plot(label="Feature Importance Visualization")
with gr.Tab("Batch Analysis"):
with gr.Row():
with gr.Column():
file_upload = gr.File(
label="Upload File (CSV/TXT)",
file_types=[".csv", ".txt"]
)
batch_input = gr.Textbox(
label="Batch Input (one text per line)",
placeholder="Enter multiple texts, one per line...",
lines=10
)
with gr.Row():
batch_language = gr.Dropdown(
choices=list(config.SUPPORTED_LANGUAGES.values()),
value="Auto Detect",
label="Language"
)
batch_theme = gr.Dropdown(
choices=list(config.THEMES.keys()),
value="default",
label="Theme"
)
with gr.Row():
batch_clean_cb = gr.Checkbox(label="Clean Text", value=False)
batch_punct_cb = gr.Checkbox(label="Remove Punctuation", value=False)
batch_nums_cb = gr.Checkbox(label="Remove Numbers", value=False)
with gr.Row():
load_file_btn = gr.Button("Load File")
analyze_batch_btn = gr.Button("Analyze Batch", variant="primary")
with gr.Column():
batch_summary = gr.Textbox(label="Batch Summary", lines=8)
batch_results_df = gr.Dataframe(
label="Detailed Results",
headers=["Index", "Text", "Sentiment", "Confidence", "Language", "Word_Count"],
datatype=["number", "str", "str", "str", "str", "number"]
)
with gr.Row():
batch_plot = gr.Plot(label="Batch Analysis Summary")
confidence_dist_plot = gr.Plot(label="Confidence Distribution")
with gr.Tab("History & Analytics"):
with gr.Row():
with gr.Column():
with gr.Row():
refresh_history_btn = gr.Button("Refresh History")
clear_history_btn = gr.Button("Clear History", variant="stop")
status_btn = gr.Button("Get Status")
history_theme = gr.Dropdown(
choices=list(config.THEMES.keys()),
value="default",
label="Dashboard Theme"
)
with gr.Row():
export_csv_btn = gr.Button("Export CSV")
export_json_btn = gr.Button("Export JSON")
with gr.Column():
history_status = gr.Textbox(label="History Status", lines=8)
history_dashboard = gr.Plot(label="History Analytics Dashboard")
with gr.Row():
csv_download = gr.File(label="CSV Download", visible=True)
json_download = gr.File(label="JSON Download", visible=True)
# Event Handlers
# Single Analysis
analyze_btn.click(
app.analyze_single,
inputs=[text_input, language_selector, theme_selector,
clean_text_cb, remove_punct_cb, remove_nums_cb],
outputs=[result_output, gauge_plot, probability_plot]
)
# FIXED Advanced Analysis with sample size control
shap_btn.click(
app.analyze_with_shap,
inputs=[advanced_text_input, advanced_language, num_samples_slider],
outputs=[advanced_results, advanced_plot]
)
lime_btn.click(
app.analyze_with_lime,
inputs=[advanced_text_input, advanced_language, num_samples_slider],
outputs=[advanced_results, advanced_plot]
)
# Batch Analysis
load_file_btn.click(
app.data_handler.process_file,
inputs=file_upload,
outputs=batch_input
)
analyze_batch_btn.click(
app.analyze_batch,
inputs=[batch_input, batch_language, batch_theme,
batch_clean_cb, batch_punct_cb, batch_nums_cb],
outputs=[batch_summary, batch_results_df, batch_plot, confidence_dist_plot]
)
# History & Analytics
refresh_history_btn.click(
app.plot_history,
inputs=history_theme,
outputs=[history_dashboard, history_status]
)
clear_history_btn.click(
lambda: f"Cleared {app.history.clear()} entries",
outputs=history_status
)
status_btn.click(
app.get_history_status,
outputs=history_status
)
export_csv_btn.click(
lambda: app.data_handler.export_data(app.history.get_all(), 'csv'),
outputs=[csv_download, history_status]
)
export_json_btn.click(
lambda: app.data_handler.export_data(app.history.get_all(), 'json'),
outputs=[json_download, history_status]
)
return demo
# Application Entry Point
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
try:
demo = create_interface()
demo.launch(
share=True,
server_name="0.0.0.0",
server_port=7860,
show_error=True
)
except Exception as e:
logging.error(f"Failed to launch application: {e}")
raise |