qwerty45-uiop commited on
Commit
4f33339
Β·
verified Β·
1 Parent(s): be4592f

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +70 -31
src/streamlit_app.py CHANGED
@@ -1,7 +1,7 @@
1
  #!/usr/bin/env python3
2
  """
3
  LLM Compatibility Advisor - Streamlined with Download Sizes
4
- Author: Assistant2
5
  Description: Provides device-based LLM recommendations with popular models and download sizes
6
  Requirements: streamlit, pandas, plotly, openpyxl
7
  """
@@ -21,30 +21,35 @@ st.set_page_config(
21
  initial_sidebar_state="expanded"
22
  )
23
 
24
- # Enhanced data loading with error handling
25
  @st.cache_data
26
  def load_data():
27
  try:
28
- # Use actual file paths
29
- df1 = pd.read_excel("src/BITS_INTERNS.xlsx", sheet_name="Form Responses 1")
30
- df2 = pd.read_excel("/mnt/data/Summer of AI - ICFAI (Responses) (3).xlsx")
31
-
32
- df1.columns = df1.columns.str.strip()
33
- df2.columns = df2.columns.str.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- # Find common columns and combine
36
- common_cols = list(set(df1.columns) & set(df2.columns))
37
- df1 = df1[common_cols]
38
- df2 = df2[common_cols]
39
-
40
- combined_df = pd.concat([df1, df2], ignore_index=True)
41
- return combined_df, None
42
- except FileNotFoundError as e:
43
- return None, f"File not found: {e}"
44
  except Exception as e:
45
  return None, f"Error loading data: {str(e)}"
46
 
47
-
48
  # Enhanced RAM extraction with better parsing
49
  def extract_numeric_ram(ram) -> Optional[int]:
50
  if pd.isna(ram):
@@ -307,20 +312,58 @@ def display_model_categories(models_dict: Dict[str, List[Dict]], ram_gb: int):
307
  with col3:
308
  st.markdown(f"*{model['description']}*")
309
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  # Main App
311
  st.title("🧠 LLM Compatibility Advisor")
312
  st.markdown("Get personalized recommendations from **150+ popular open source AI models** with download sizes!")
313
 
314
- # Load data
315
  df, error = load_data()
316
 
317
- if error:
318
- st.error(error)
319
- st.info("Please ensure the Excel file 'BITS_INTERNS.xlsx' is in the same directory as this script.")
320
- st.stop()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321
 
322
  if df is None or df.empty:
323
- st.error("No data found in the Excel file.")
324
  st.stop()
325
 
326
  # Sidebar filters and info
@@ -426,7 +469,7 @@ df_display["Mobile LLM"] = mobile_recommendations
426
  df_display["Laptop Tier"] = laptop_tiers
427
  df_display["Mobile Tier"] = mobile_tiers
428
 
429
- # Filter based on sidebar selections (RAM range filter removed)
430
  mask = (laptop_tiers.isin(performance_filter) | mobile_tiers.isin(performance_filter))
431
 
432
  df_filtered = df_display[mask]
@@ -481,7 +524,7 @@ with explorer_col1:
481
  selected_ram_range = st.selectbox(
482
  "Select RAM range to explore models:",
483
  ["≀2GB (Ultra Low)", "3-4GB (Low)", "5-6GB (Moderate-Low)",
484
- "7-8GB (Moderate)", "9-16GB (Good)", "17-32GB (High)", ">32GB (Ultra High)"]
485
  )
486
 
487
  with explorer_col2:
@@ -591,7 +634,7 @@ with st.expander("πŸ“˜ Model Guide & Download Information"):
591
  - Consider bandwidth and storage before downloading
592
  - Start with 4-bit quantized versions for testing
593
  - Use `ollama pull model_name` for easiest setup
594
-
595
  ## πŸ”§ Optimization Strategies
596
 
597
  ### **Memory Reduction**
@@ -609,17 +652,13 @@ with st.expander("πŸ“˜ Model Guide & Download Information"):
609
  st.markdown("---")
610
  st.markdown("""
611
  ### πŸ”— Essential Download & Deployment Tools
612
-
613
  **πŸ“¦ Easy Model Deployment:**
614
  - [**Ollama**](https://ollama.ai/) – `curl -fsSL https://ollama.ai/install.sh | sh`
615
  - [**LM Studio**](https://lmstudio.ai/) – Drag-and-drop GUI for running models locally
616
  - [**GPT4All**](https://gpt4all.io/) – Cross-platform desktop app for local LLMs
617
-
618
  **πŸ€— Model Repositories:**
619
  - [**Hugging Face Hub**](https://huggingface.co/models) – Filter by model size, task, and license
620
  - [**TheBloke's Quantizations**](https://huggingface.co/TheBloke) – Pre-quantized models in GGUF/GPTQ format
621
  - [**Awesome LLM**](https://github.com/Hannibal046/Awesome-LLMs) – Curated list of models and resources
622
-
623
-
624
  ---
625
  """)
 
1
  #!/usr/bin/env python3
2
  """
3
  LLM Compatibility Advisor - Streamlined with Download Sizes
4
+ Author: Assistant
5
  Description: Provides device-based LLM recommendations with popular models and download sizes
6
  Requirements: streamlit, pandas, plotly, openpyxl
7
  """
 
21
  initial_sidebar_state="expanded"
22
  )
23
 
24
+ # Enhanced data loading with error handling - FIXED FILE PATH
25
  @st.cache_data
26
  def load_data():
27
  try:
28
+ # Try multiple possible file locations
29
+ possible_paths = [
30
+ "BITS_INTERNS.xlsx", # Current directory
31
+ "src/BITS_INTERNS.xlsx", # src subdirectory
32
+ "./BITS_INTERNS.xlsx", # Explicit current directory
33
+ ]
34
+
35
+ df = None
36
+ for path in possible_paths:
37
+ try:
38
+ df = pd.read_excel(path, sheet_name="Form Responses 1")
39
+ st.info(f"βœ… Successfully loaded data from: {path}")
40
+ break
41
+ except FileNotFoundError:
42
+ continue
43
+
44
+ if df is None:
45
+ return None, "Excel file 'BITS_INTERNS.xlsx' not found in any expected location. Please ensure the file is in the same directory as this script."
46
+
47
+ df.columns = df.columns.str.strip()
48
+ return df, None
49
 
 
 
 
 
 
 
 
 
 
50
  except Exception as e:
51
  return None, f"Error loading data: {str(e)}"
52
 
 
53
  # Enhanced RAM extraction with better parsing
54
  def extract_numeric_ram(ram) -> Optional[int]:
55
  if pd.isna(ram):
 
312
  with col3:
313
  st.markdown(f"*{model['description']}*")
314
 
315
+ # File upload fallback
316
+ def show_file_upload():
317
+ """Show file upload option when Excel file is not found"""
318
+ st.warning("πŸ“ Excel file not found. Please upload your data file:")
319
+ uploaded_file = st.file_uploader(
320
+ "Upload BITS_INTERNS.xlsx",
321
+ type=['xlsx', 'xls'],
322
+ help="Upload the Excel file containing student data"
323
+ )
324
+
325
+ if uploaded_file is not None:
326
+ try:
327
+ df = pd.read_excel(uploaded_file, sheet_name="Form Responses 1")
328
+ df.columns = df.columns.str.strip()
329
+ st.success("βœ… File uploaded successfully!")
330
+ return df, None
331
+ except Exception as e:
332
+ return None, f"Error reading uploaded file: {str(e)}"
333
+
334
+ return None, "Please upload the Excel file to continue."
335
+
336
  # Main App
337
  st.title("🧠 LLM Compatibility Advisor")
338
  st.markdown("Get personalized recommendations from **150+ popular open source AI models** with download sizes!")
339
 
340
+ # Load data with fallback to file upload
341
  df, error = load_data()
342
 
343
+ if error and df is None:
344
+ # Show file upload option
345
+ df, upload_error = show_file_upload()
346
+ if upload_error:
347
+ st.error(upload_error)
348
+ st.info("πŸ“‹ **To fix this issue:**\n1. Ensure 'BITS_INTERNS.xlsx' is in the same directory as this script\n2. Or use the file upload option above\n3. Check that the Excel file has a sheet named 'Form Responses 1'")
349
+
350
+ # Show demo mode option
351
+ if st.button("πŸš€ Try Demo Mode"):
352
+ # Create sample data for demonstration
353
+ demo_data = {
354
+ "Full Name": ["John Doe", "Jane Smith", "Alex Johnson", "Sarah Wilson"],
355
+ "Laptop RAM": ["8GB", "16GB", "4GB", "32GB"],
356
+ "Mobile RAM": ["6GB", "8GB", "4GB", "12GB"],
357
+ "Laptop Operating System": ["Windows 11", "macOS", "Ubuntu Linux", "Windows 10"],
358
+ "Mobile Operating System": ["Android", "iOS", "Android", "iOS"]
359
+ }
360
+ df = pd.DataFrame(demo_data)
361
+ st.success("πŸ”₯ Demo mode activated! Using sample data.")
362
+ else:
363
+ st.stop()
364
 
365
  if df is None or df.empty:
366
+ st.error("No data found in the file.")
367
  st.stop()
368
 
369
  # Sidebar filters and info
 
469
  df_display["Laptop Tier"] = laptop_tiers
470
  df_display["Mobile Tier"] = mobile_tiers
471
 
472
+ # Filter based on sidebar selections
473
  mask = (laptop_tiers.isin(performance_filter) | mobile_tiers.isin(performance_filter))
474
 
475
  df_filtered = df_display[mask]
 
524
  selected_ram_range = st.selectbox(
525
  "Select RAM range to explore models:",
526
  ["≀2GB (Ultra Low)", "3-4GB (Low)", "5-6GB (Moderate-Low)",
527
+ "7-8GB (Moderate)", "9-16GB (Good)", "17-32GB (High)", ">32GB (Ultra High)"]
528
  )
529
 
530
  with explorer_col2:
 
634
  - Consider bandwidth and storage before downloading
635
  - Start with 4-bit quantized versions for testing
636
  - Use `ollama pull model_name` for easiest setup
637
+
638
  ## πŸ”§ Optimization Strategies
639
 
640
  ### **Memory Reduction**
 
652
  st.markdown("---")
653
  st.markdown("""
654
  ### πŸ”— Essential Download & Deployment Tools
 
655
  **πŸ“¦ Easy Model Deployment:**
656
  - [**Ollama**](https://ollama.ai/) – `curl -fsSL https://ollama.ai/install.sh | sh`
657
  - [**LM Studio**](https://lmstudio.ai/) – Drag-and-drop GUI for running models locally
658
  - [**GPT4All**](https://gpt4all.io/) – Cross-platform desktop app for local LLMs
 
659
  **πŸ€— Model Repositories:**
660
  - [**Hugging Face Hub**](https://huggingface.co/models) – Filter by model size, task, and license
661
  - [**TheBloke's Quantizations**](https://huggingface.co/TheBloke) – Pre-quantized models in GGUF/GPTQ format
662
  - [**Awesome LLM**](https://github.com/Hannibal046/Awesome-LLMs) – Curated list of models and resources
 
 
663
  ---
664
  """)