Update src/streamlit_app.py
Browse files- src/streamlit_app.py +80 -44
src/streamlit_app.py
CHANGED
@@ -8,6 +8,7 @@ Requirements: streamlit, pandas, plotly, openpyxl
|
|
8 |
|
9 |
import streamlit as st
|
10 |
import pandas as pd
|
|
|
11 |
import re
|
12 |
import plotly.express as px
|
13 |
import plotly.graph_objects as go
|
@@ -329,6 +330,30 @@ def generate_demo_data():
|
|
329 |
}
|
330 |
return pd.DataFrame(demo_data)
|
331 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
332 |
# Main App
|
333 |
st.title("๐ง LLM Compatibility Advisor")
|
334 |
st.markdown("Get personalized recommendations from **150+ popular open source AI models** with download sizes!")
|
@@ -360,6 +385,10 @@ if missing_columns:
|
|
360 |
st.info("Please ensure your Excel file contains the required columns.")
|
361 |
st.stop()
|
362 |
|
|
|
|
|
|
|
|
|
363 |
# Sidebar filters and info
|
364 |
with st.sidebar:
|
365 |
st.header("๐ Filters & Info")
|
@@ -393,57 +422,64 @@ with st.sidebar:
|
|
393 |
if not pd.isna(avg_mobile_ram):
|
394 |
st.metric("Avg Mobile RAM", f"{avg_mobile_ram:.1f} GB")
|
395 |
|
396 |
-
# User selection with search
|
397 |
st.subheader("๐ค Individual Student Analysis")
|
|
|
|
|
|
|
|
|
398 |
selected_user = st.selectbox(
|
399 |
"Choose a student:",
|
400 |
-
options=
|
401 |
-
|
402 |
)
|
403 |
|
404 |
-
if selected_user:
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
with col1:
|
411 |
-
st.markdown("### ๐ป Laptop Configuration")
|
412 |
-
laptop_os_icon, laptop_os_name = get_os_info(user_data.get('Laptop Operating System'))
|
413 |
-
laptop_ram = user_data.get('Laptop RAM', 'Not specified')
|
414 |
-
laptop_rec, laptop_tier, laptop_info, laptop_models = recommend_llm(laptop_ram)
|
415 |
-
laptop_ram_gb = extract_numeric_ram(laptop_ram) or 0
|
416 |
-
|
417 |
-
st.markdown(f"**OS:** {laptop_os_icon} {laptop_os_name}")
|
418 |
-
st.markdown(f"**RAM:** {laptop_ram}")
|
419 |
-
st.markdown(f"**Performance Tier:** {laptop_tier}")
|
420 |
|
421 |
-
|
422 |
-
st.
|
423 |
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
435 |
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
447 |
|
448 |
# Batch Analysis Section
|
449 |
st.markdown("---")
|
@@ -589,7 +625,7 @@ with st.expander("๐ Model Guide & Download Information"):
|
|
589 |
- **Zephyr**: HuggingFace's chat specialist
|
590 |
- **OpenChat**: High-quality conversation models
|
591 |
- **Neural-Chat**: Intel-optimized chat models
|
592 |
-
|
593 |
### ๐งฎ **Reasoning Masters**
|
594 |
- **WizardMath**: Mathematical problem solving
|
595 |
- **MetaMath**: Advanced arithmetic reasoning
|
|
|
8 |
|
9 |
import streamlit as st
|
10 |
import pandas as pd
|
11 |
+
import numpy as np
|
12 |
import re
|
13 |
import plotly.express as px
|
14 |
import plotly.graph_objects as go
|
|
|
330 |
}
|
331 |
return pd.DataFrame(demo_data)
|
332 |
|
333 |
+
# Function to safely prepare user options
|
334 |
+
def prepare_user_options(df):
|
335 |
+
"""Safely prepare user options for selectbox, handling NaN values and mixed types"""
|
336 |
+
try:
|
337 |
+
# Get unique names and filter out NaN values
|
338 |
+
unique_names = df["Full Name"].dropna().unique()
|
339 |
+
|
340 |
+
# Convert to strings and filter out any remaining non-string values
|
341 |
+
valid_names = []
|
342 |
+
for name in unique_names:
|
343 |
+
try:
|
344 |
+
str_name = str(name).strip()
|
345 |
+
if str_name and str_name.lower() != 'nan':
|
346 |
+
valid_names.append(str_name)
|
347 |
+
except:
|
348 |
+
continue
|
349 |
+
|
350 |
+
# Create options list with proper string concatenation
|
351 |
+
options = ["Select a student..."] + sorted(valid_names)
|
352 |
+
return options
|
353 |
+
except Exception as e:
|
354 |
+
st.error(f"Error preparing user options: {e}")
|
355 |
+
return ["Select a student..."]
|
356 |
+
|
357 |
# Main App
|
358 |
st.title("๐ง LLM Compatibility Advisor")
|
359 |
st.markdown("Get personalized recommendations from **150+ popular open source AI models** with download sizes!")
|
|
|
385 |
st.info("Please ensure your Excel file contains the required columns.")
|
386 |
st.stop()
|
387 |
|
388 |
+
# Clean the dataframe
|
389 |
+
df = df.copy()
|
390 |
+
df["Full Name"] = df["Full Name"].astype(str).str.strip()
|
391 |
+
|
392 |
# Sidebar filters and info
|
393 |
with st.sidebar:
|
394 |
st.header("๐ Filters & Info")
|
|
|
422 |
if not pd.isna(avg_mobile_ram):
|
423 |
st.metric("Avg Mobile RAM", f"{avg_mobile_ram:.1f} GB")
|
424 |
|
425 |
+
# User selection with search - FIXED VERSION
|
426 |
st.subheader("๐ค Individual Student Analysis")
|
427 |
+
|
428 |
+
# Prepare options safely
|
429 |
+
user_options = prepare_user_options(df)
|
430 |
+
|
431 |
selected_user = st.selectbox(
|
432 |
"Choose a student:",
|
433 |
+
options=user_options,
|
434 |
+
index=0 # Default to first option ("Select a student...")
|
435 |
)
|
436 |
|
437 |
+
if selected_user and selected_user != "Select a student...":
|
438 |
+
# Find user data with safe lookup
|
439 |
+
user_data_mask = df["Full Name"].astype(str).str.strip() == selected_user
|
440 |
+
if user_data_mask.any():
|
441 |
+
user_data = df[user_data_mask].iloc[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
|
443 |
+
# Enhanced user display
|
444 |
+
col1, col2 = st.columns(2)
|
445 |
|
446 |
+
with col1:
|
447 |
+
st.markdown("### ๐ป Laptop Configuration")
|
448 |
+
laptop_os_icon, laptop_os_name = get_os_info(user_data.get('Laptop Operating System'))
|
449 |
+
laptop_ram = user_data.get('Laptop RAM', 'Not specified')
|
450 |
+
laptop_rec, laptop_tier, laptop_info, laptop_models = recommend_llm(laptop_ram)
|
451 |
+
laptop_ram_gb = extract_numeric_ram(laptop_ram) or 0
|
452 |
+
|
453 |
+
st.markdown(f"**OS:** {laptop_os_icon} {laptop_os_name}")
|
454 |
+
st.markdown(f"**RAM:** {laptop_ram}")
|
455 |
+
st.markdown(f"**Performance Tier:** {laptop_tier}")
|
456 |
+
|
457 |
+
st.success(f"**๐ก Recommendation:** {laptop_rec}")
|
458 |
+
st.info(f"**โน๏ธ Notes:** {laptop_info}")
|
459 |
+
|
460 |
+
# Display detailed models for laptop
|
461 |
+
if laptop_models:
|
462 |
+
filtered_models = {k: v for k, v in laptop_models.items() if k in show_categories}
|
463 |
+
display_model_categories(filtered_models, laptop_ram_gb)
|
464 |
|
465 |
+
with col2:
|
466 |
+
st.markdown("### ๐ฑ Mobile Configuration")
|
467 |
+
mobile_os_icon, mobile_os_name = get_os_info(user_data.get('Mobile Operating System'))
|
468 |
+
mobile_ram = user_data.get('Mobile RAM', 'Not specified')
|
469 |
+
mobile_rec, mobile_tier, mobile_info, mobile_models = recommend_llm(mobile_ram)
|
470 |
+
mobile_ram_gb = extract_numeric_ram(mobile_ram) or 0
|
471 |
+
|
472 |
+
st.markdown(f"**OS:** {mobile_os_icon} {mobile_os_name}")
|
473 |
+
st.markdown(f"**RAM:** {mobile_ram}")
|
474 |
+
st.markdown(f"**Performance Tier:** {mobile_tier}")
|
475 |
+
|
476 |
+
st.success(f"**๐ก Recommendation:** {mobile_rec}")
|
477 |
+
st.info(f"**โน๏ธ Notes:** {mobile_info}")
|
478 |
+
|
479 |
+
# Display detailed models for mobile
|
480 |
+
if mobile_models:
|
481 |
+
filtered_models = {k: v for k, v in mobile_models.items() if k in show_categories}
|
482 |
+
display_model_categories(filtered_models, mobile_ram_gb)
|
483 |
|
484 |
# Batch Analysis Section
|
485 |
st.markdown("---")
|
|
|
625 |
- **Zephyr**: HuggingFace's chat specialist
|
626 |
- **OpenChat**: High-quality conversation models
|
627 |
- **Neural-Chat**: Intel-optimized chat models
|
628 |
+
|
629 |
### ๐งฎ **Reasoning Masters**
|
630 |
- **WizardMath**: Mathematical problem solving
|
631 |
- **MetaMath**: Advanced arithmetic reasoning
|