Update src/streamlit_app.py
Browse files- src/streamlit_app.py +51 -14
src/streamlit_app.py
CHANGED
@@ -39,6 +39,12 @@ def load_data():
|
|
39 |
return None, f"Excel file '{path}' not found. Please upload the file."
|
40 |
except Exception as e:
|
41 |
return None, f"Error loading '{path}': {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
# Enhanced RAM extraction with better parsing
|
44 |
def extract_numeric_ram(ram) -> Optional[int]:
|
@@ -302,20 +308,56 @@ def display_model_categories(models_dict: Dict[str, List[Dict]], ram_gb: int):
|
|
302 |
with col3:
|
303 |
st.markdown(f"*{model['description']}*")
|
304 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
305 |
# Main App
|
306 |
st.title("π§ LLM Compatibility Advisor")
|
307 |
st.markdown("Get personalized recommendations from **150+ popular open source AI models** with download sizes!")
|
308 |
|
309 |
-
# Load data
|
310 |
df, error = load_data()
|
311 |
|
312 |
-
if error:
|
313 |
-
st.
|
314 |
-
st.info("
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
319 |
st.stop()
|
320 |
|
321 |
# Sidebar filters and info
|
@@ -421,9 +463,8 @@ df_display["Mobile LLM"] = mobile_recommendations
|
|
421 |
df_display["Laptop Tier"] = laptop_tiers
|
422 |
df_display["Mobile Tier"] = mobile_tiers
|
423 |
|
424 |
-
# Filter based on sidebar selections
|
425 |
mask = (laptop_tiers.isin(performance_filter) | mobile_tiers.isin(performance_filter))
|
426 |
-
|
427 |
df_filtered = df_display[mask]
|
428 |
|
429 |
# Display filtered table
|
@@ -604,17 +645,13 @@ with st.expander("π Model Guide & Download Information"):
|
|
604 |
st.markdown("---")
|
605 |
st.markdown("""
|
606 |
### π Essential Download & Deployment Tools
|
607 |
-
|
608 |
**π¦ Easy Model Deployment:**
|
609 |
- [**Ollama**](https://ollama.ai/) β `curl -fsSL https://ollama.ai/install.sh | sh`
|
610 |
- [**LM Studio**](https://lmstudio.ai/) β Drag-and-drop GUI for running models locally
|
611 |
- [**GPT4All**](https://gpt4all.io/) β Cross-platform desktop app for local LLMs
|
612 |
-
|
613 |
**π€ Model Repositories:**
|
614 |
- [**Hugging Face Hub**](https://huggingface.co/models) β Filter by model size, task, and license
|
615 |
- [**TheBloke's Quantizations**](https://huggingface.co/TheBloke) β Pre-quantized models in GGUF/GPTQ format
|
616 |
- [**Awesome LLM**](https://github.com/Hannibal046/Awesome-LLMs) β Curated list of models and resources
|
617 |
-
|
618 |
-
|
619 |
---
|
620 |
""")
|
|
|
39 |
return None, f"Excel file '{path}' not found. Please upload the file."
|
40 |
except Exception as e:
|
41 |
return None, f"Error loading '{path}': {str(e)}"
|
42 |
+
|
43 |
+
# Return success case - this was missing!
|
44 |
+
if combined_df.empty:
|
45 |
+
return None, "No data found in Excel files."
|
46 |
+
else:
|
47 |
+
return combined_df, None
|
48 |
|
49 |
# Enhanced RAM extraction with better parsing
|
50 |
def extract_numeric_ram(ram) -> Optional[int]:
|
|
|
308 |
with col3:
|
309 |
st.markdown(f"*{model['description']}*")
|
310 |
|
311 |
+
# Demo data generator for when Excel files are not available
|
312 |
+
def generate_demo_data():
|
313 |
+
"""Generate demo data for testing when Excel files are missing"""
|
314 |
+
demo_data = {
|
315 |
+
"Full Name": [
|
316 |
+
"Demo Student 1", "Demo Student 2", "Demo Student 3", "Demo Student 4",
|
317 |
+
"Demo Student 5", "Demo Student 6", "Demo Student 7", "Demo Student 8"
|
318 |
+
],
|
319 |
+
"Laptop RAM": ["8GB", "16GB", "4GB", "32GB", "6GB", "12GB", "2GB", "24GB"],
|
320 |
+
"Mobile RAM": ["4GB", "8GB", "3GB", "12GB", "6GB", "4GB", "2GB", "8GB"],
|
321 |
+
"Laptop Operating System": [
|
322 |
+
"Windows 11", "macOS Monterey", "Ubuntu 22.04", "Windows 10",
|
323 |
+
"macOS Big Sur", "Fedora 36", "Windows 11", "macOS Ventura"
|
324 |
+
],
|
325 |
+
"Mobile Operating System": [
|
326 |
+
"Android 13", "iOS 16", "Android 12", "iOS 15",
|
327 |
+
"Android 14", "iOS 17", "Android 11", "iOS 16"
|
328 |
+
]
|
329 |
+
}
|
330 |
+
return pd.DataFrame(demo_data)
|
331 |
+
|
332 |
# Main App
|
333 |
st.title("π§ LLM Compatibility Advisor")
|
334 |
st.markdown("Get personalized recommendations from **150+ popular open source AI models** with download sizes!")
|
335 |
|
336 |
+
# Load data with better error handling
|
337 |
df, error = load_data()
|
338 |
|
339 |
+
if error or df is None or df.empty:
|
340 |
+
st.warning("β οΈ Excel files not found. Running with demo data for testing.")
|
341 |
+
st.info("π To use real data, place 'BITS_INTERNS.xlsx' and 'Summer of AI - ICFAI (Responses) (3).xlsx' in the 'src/' directory.")
|
342 |
+
df = generate_demo_data()
|
343 |
+
|
344 |
+
with st.expander("π Expected Data Format"):
|
345 |
+
st.markdown("""
|
346 |
+
The app expects Excel files with the following columns:
|
347 |
+
- **Full Name**: Student name
|
348 |
+
- **Laptop RAM**: RAM specification (e.g., "8GB", "16 GB", "8192MB")
|
349 |
+
- **Mobile RAM**: Mobile device RAM
|
350 |
+
- **Laptop Operating System**: OS name
|
351 |
+
- **Mobile Operating System**: Mobile OS name
|
352 |
+
""")
|
353 |
+
|
354 |
+
# Verify required columns exist
|
355 |
+
required_columns = ["Full Name", "Laptop RAM", "Mobile RAM"]
|
356 |
+
missing_columns = [col for col in required_columns if col not in df.columns]
|
357 |
+
|
358 |
+
if missing_columns:
|
359 |
+
st.error(f"Missing required columns: {missing_columns}")
|
360 |
+
st.info("Please ensure your Excel file contains the required columns.")
|
361 |
st.stop()
|
362 |
|
363 |
# Sidebar filters and info
|
|
|
463 |
df_display["Laptop Tier"] = laptop_tiers
|
464 |
df_display["Mobile Tier"] = mobile_tiers
|
465 |
|
466 |
+
# Filter based on sidebar selections
|
467 |
mask = (laptop_tiers.isin(performance_filter) | mobile_tiers.isin(performance_filter))
|
|
|
468 |
df_filtered = df_display[mask]
|
469 |
|
470 |
# Display filtered table
|
|
|
645 |
st.markdown("---")
|
646 |
st.markdown("""
|
647 |
### π Essential Download & Deployment Tools
|
|
|
648 |
**π¦ Easy Model Deployment:**
|
649 |
- [**Ollama**](https://ollama.ai/) β `curl -fsSL https://ollama.ai/install.sh | sh`
|
650 |
- [**LM Studio**](https://lmstudio.ai/) β Drag-and-drop GUI for running models locally
|
651 |
- [**GPT4All**](https://gpt4all.io/) β Cross-platform desktop app for local LLMs
|
|
|
652 |
**π€ Model Repositories:**
|
653 |
- [**Hugging Face Hub**](https://huggingface.co/models) β Filter by model size, task, and license
|
654 |
- [**TheBloke's Quantizations**](https://huggingface.co/TheBloke) β Pre-quantized models in GGUF/GPTQ format
|
655 |
- [**Awesome LLM**](https://github.com/Hannibal046/Awesome-LLMs) β Curated list of models and resources
|
|
|
|
|
656 |
---
|
657 |
""")
|