qwerty45-uiop commited on
Commit
b5a1a53
Β·
verified Β·
1 Parent(s): dc8478e

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +237 -138
src/streamlit_app.py CHANGED
@@ -1,8 +1,8 @@
1
  #!/usr/bin/env python3
2
  """
3
- LLM Compatibility Advisor - Streamlined with Download Sizes
4
  Author: Assistant
5
- Description: Provides device-based LLM recommendations with popular models and download sizes
6
  Requirements: streamlit, pandas, plotly, openpyxl
7
  """
8
 
@@ -13,10 +13,11 @@ import re
13
  import plotly.express as px
14
  import plotly.graph_objects as go
15
  from typing import Optional, Tuple, List, Dict
 
16
 
17
  # βœ… MUST be the first Streamlit command
18
  st.set_page_config(
19
- page_title="LLM Compatibility Advisor",
20
  layout="wide",
21
  page_icon="🧠",
22
  initial_sidebar_state="expanded"
@@ -41,7 +42,6 @@ def load_data():
41
  except Exception as e:
42
  return None, f"Error loading '{path}': {str(e)}"
43
 
44
- # Return success case - this was missing!
45
  if combined_df.empty:
46
  return None, "No data found in Excel files."
47
  else:
@@ -71,161 +71,221 @@ def extract_numeric_ram(ram) -> Optional[int]:
71
 
72
  return None
73
 
74
- # Streamlined LLM database with popular models and download sizes
75
- QUANTIZATION_INFO = {
76
- "fp16": {"multiplier": 1.0, "label": "FP16 (Full)", "description": "Best quality, largest size"},
77
- "8bit": {"multiplier": 0.5, "label": "8-bit", "description": "Good quality, 50% smaller"},
78
- "4bit": {"multiplier": 0.25, "label": "4-bit", "description": "Decent quality, 75% smaller"}
 
79
  }
80
 
81
- def calculate_quantized_size(base_size_str: str, quantization: str) -> str:
82
- """Calculate quantized model size based on base size and quantization type"""
83
- try:
84
- import re
85
- match = re.match(r'(\d+(?:\.\d+)?)\s*(GB|MB)', base_size_str.upper())
86
- if not match:
87
- return base_size_str
88
-
89
- value, unit = float(match.group(1)), match.group(2)
90
- multiplier = QUANTIZATION_INFO[quantization]["multiplier"]
91
-
92
- new_value = value * multiplier
93
-
94
- if unit == "MB" and new_value >= 1024:
95
- new_value = new_value / 1024
96
- unit = "GB"
97
- elif unit == "GB" and new_value < 1:
98
- new_value = new_value * 1024
99
- unit = "MB"
100
-
101
- return f"{new_value:.0f}{unit}" if new_value >= 10 else f"{new_value:.1f}{unit}"
102
- except:
103
  return base_size_str
 
 
 
 
 
 
 
 
104
 
105
- def get_quantization_recommendations(ram_gb: int) -> List[str]:
106
- """Recommend best quantization options based on available RAM"""
107
- if ram_gb <= 2:
108
- return ["4bit"]
109
- elif ram_gb <= 4:
110
- return ["4bit", "8bit"]
111
- elif ram_gb <= 8:
112
- return ["4bit", "8bit"]
113
- elif ram_gb <= 16:
114
- return ["8bit", "fp16"]
115
- else:
116
- return ["fp16", "8bit", "4bit"]
117
-
118
  LLM_DATABASE = {
119
  "ultra_low": { # ≀2GB
120
  "general": [
121
- {"name": "TinyLlama-1.1B-Chat", "size": "637MB", "description": "Compact chat model"},
122
- {"name": "DistilBERT-base", "size": "268MB", "description": "Efficient BERT variant"},
123
- {"name": "all-MiniLM-L6-v2", "size": "91MB", "description": "Sentence embeddings"}
 
124
  ],
125
  "code": [
126
- {"name": "CodeT5-small", "size": "242MB", "description": "Code generation"},
127
- {"name": "Replit-code-v1-3B", "size": "1.2GB", "description": "Code completion"}
128
  ]
129
  },
130
  "low": { # 3-4GB
131
  "general": [
132
- {"name": "Phi-1.5", "size": "2.8GB", "description": "Microsoft's efficient model"},
133
- {"name": "Gemma-2B", "size": "1.4GB", "description": "Google's compact model"},
134
- {"name": "OpenLLaMA-3B", "size": "2.1GB", "description": "Open source LLaMA"}
 
135
  ],
136
  "code": [
137
- {"name": "CodeGen-2B", "size": "1.8GB", "description": "Salesforce code model"},
138
- {"name": "StarCoder-1B", "size": "1.1GB", "description": "BigCode project"}
139
  ],
140
  "chat": [
141
- {"name": "Alpaca-3B", "size": "2.0GB", "description": "Stanford's instruction model"},
142
- {"name": "Vicuna-3B", "size": "2.1GB", "description": "ChatGPT-style training"}
143
  ]
144
  },
145
  "moderate_low": { # 5-6GB
146
  "general": [
147
- {"name": "Phi-2", "size": "5.2GB", "description": "Microsoft's 2.7B model"},
148
- {"name": "Gemma-7B-it", "size": "4.2GB", "description": "Google instruction tuned"},
149
- {"name": "Mistral-7B-v0.1", "size": "4.1GB", "description": "Mistral AI base model"}
 
150
  ],
151
  "code": [
152
- {"name": "CodeLlama-7B", "size": "3.8GB", "description": "Meta's code specialist"},
153
- {"name": "StarCoder-7B", "size": "4.0GB", "description": "Code generation expert"}
154
  ],
155
  "chat": [
156
- {"name": "Zephyr-7B-beta", "size": "4.2GB", "description": "HuggingFace chat model"},
157
- {"name": "Neural-Chat-7B", "size": "4.1GB", "description": "Intel optimized"}
158
  ]
159
  },
160
  "moderate": { # 7-8GB
161
  "general": [
162
- {"name": "Llama-2-7B-Chat", "size": "3.5GB", "description": "Meta's popular chat model"},
163
- {"name": "Mistral-7B-Instruct-v0.2", "size": "4.1GB", "description": "Latest Mistral instruct"},
164
- {"name": "Qwen-7B-Chat", "size": "4.0GB", "description": "Alibaba's multilingual"}
 
165
  ],
166
  "code": [
167
- {"name": "CodeLlama-7B-Instruct", "size": "3.8GB", "description": "Instruction-tuned CodeLlama"},
168
- {"name": "WizardCoder-7B", "size": "4.0GB", "description": "Enhanced coding abilities"},
169
- {"name": "Phind-CodeLlama-34B-v2", "size": "4.2GB", "description": "4-bit quantized version"}
170
  ],
171
  "reasoning": [
172
- {"name": "WizardMath-7B", "size": "4.0GB", "description": "Mathematical reasoning"},
173
- {"name": "MetaMath-7B", "size": "3.9GB", "description": "Math problem solving"}
174
  ]
175
  },
176
  "good": { # 9-16GB
177
  "general": [
178
- {"name": "Llama-2-13B-Chat", "size": "7.3GB", "description": "Larger Llama variant"},
179
- {"name": "Vicuna-13B-v1.5", "size": "7.2GB", "description": "Enhanced Vicuna"},
180
- {"name": "OpenChat-3.5", "size": "7.1GB", "description": "High-quality chat model"}
 
181
  ],
182
  "code": [
183
- {"name": "CodeLlama-13B-Instruct", "size": "7.3GB", "description": "Larger code model"},
184
- {"name": "WizardCoder-15B", "size": "8.2GB", "description": "Advanced coding"},
185
- {"name": "StarCoder-15B", "size": "8.5GB", "description": "Large code model"}
186
  ],
187
  "multimodal": [
188
- {"name": "LLaVA-7B", "size": "7.0GB", "description": "Vision + language"},
189
- {"name": "MiniGPT-4-7B", "size": "6.8GB", "description": "Multimodal chat"}
 
190
  ],
191
  "reasoning": [
192
- {"name": "WizardMath-13B", "size": "7.3GB", "description": "Advanced math"},
193
- {"name": "Orca-2-13B", "size": "7.4GB", "description": "Microsoft reasoning"}
194
  ]
195
  },
196
  "high": { # 17-32GB
197
  "general": [
198
- {"name": "Mixtral-8x7B-Instruct-v0.1", "size": "26.9GB", "description": "Mixture of experts"},
199
- {"name": "Llama-2-70B-Chat", "size": "38.0GB", "description": "8-bit quantized"},
200
- {"name": "Yi-34B-Chat", "size": "19.5GB", "description": "01.AI's large model"}
 
201
  ],
202
  "code": [
203
- {"name": "CodeLlama-34B-Instruct", "size": "19.0GB", "description": "Large code specialist"},
204
- {"name": "DeepSeek-Coder-33B", "size": "18.5GB", "description": "DeepSeek's coder"},
205
- {"name": "WizardCoder-34B", "size": "19.2GB", "description": "Enterprise coding"}
206
  ],
207
  "reasoning": [
208
- {"name": "WizardMath-70B", "size": "38.5GB", "description": "8-bit quantized math"},
209
- {"name": "MetaMath-70B", "size": "38.0GB", "description": "8-bit math reasoning"}
210
  ]
211
  },
212
  "ultra_high": { # >32GB
213
  "general": [
214
- {"name": "Llama-2-70B", "size": "130GB", "description": "Full precision"},
215
- {"name": "Mixtral-8x22B", "size": "176GB", "description": "Latest mixture model"},
216
- {"name": "Qwen-72B", "size": "145GB", "description": "Alibaba's flagship"}
 
217
  ],
218
  "code": [
219
- {"name": "CodeLlama-34B", "size": "68GB", "description": "Full precision code"},
220
- {"name": "DeepSeek-Coder-33B", "size": "66GB", "description": "Full precision coding"}
221
  ],
222
  "reasoning": [
223
- {"name": "WizardMath-70B", "size": "130GB", "description": "Full precision math"},
224
- {"name": "Goat-70B", "size": "132GB", "description": "Arithmetic reasoning"}
225
  ]
226
  }
227
  }
228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
  # Enhanced LLM recommendation with performance tiers
230
  def recommend_llm(ram_str) -> Tuple[str, str, str, Dict[str, List[Dict]]]:
231
  """Returns (recommendation, performance_tier, additional_info, detailed_models)"""
@@ -300,37 +360,33 @@ def get_os_info(os_name) -> Tuple[str, str]:
300
  else:
301
  return "πŸ’»", os_name
302
 
303
- # Performance visualization
304
- def create_performance_chart(df):
305
- """Create a performance distribution chart"""
306
- laptop_rams = df["Laptop RAM"].apply(extract_numeric_ram).dropna()
307
- mobile_rams = df["Mobile RAM"].apply(extract_numeric_ram).dropna()
308
-
309
- fig = go.Figure()
310
-
311
- fig.add_trace(go.Histogram(
312
- x=laptop_rams,
313
- name="Laptop RAM",
314
- opacity=0.7,
315
- nbinsx=10
316
- ))
317
-
318
- fig.add_trace(go.Histogram(
319
- x=mobile_rams,
320
- name="Mobile RAM",
321
- opacity=0.7,
322
- nbinsx=10
323
- ))
324
-
325
- fig.update_layout(
326
- title="RAM Distribution Across Devices",
327
- xaxis_title="RAM (GB)",
328
- yaxis_title="Number of Students",
329
- barmode='overlay',
330
- height=400
331
- )
332
 
333
- return fig
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
 
335
  # Enhanced model details display function
336
  def display_model_categories(models_dict: Dict[str, List[Dict]], ram_gb: int, show_quantization=True):
@@ -343,8 +399,18 @@ def display_model_categories(models_dict: Dict[str, List[Dict]], ram_gb: int, sh
343
  for category, model_list in models_dict.items():
344
  if model_list:
345
  with st.expander(f"πŸ“‚ {category.replace('_', ' ').title()} Models"):
346
- for model in model_list[:6]: # Reduced to show quantization options
347
  st.markdown(f"**{model['name']}**")
 
 
 
 
 
 
 
 
 
 
348
  st.markdown(f"*{model['description']}*")
349
 
350
  if show_quantization:
@@ -354,32 +420,68 @@ def display_model_categories(models_dict: Dict[str, List[Dict]], ram_gb: int, sh
354
  with quant_cols[i]:
355
  quant_size = calculate_quantized_size(model['size'], quant_type)
356
  st.metric(
357
- label=quant_type,
358
  value=quant_size,
359
  help=quant_info['description']
360
  )
361
- else:
362
- st.markdown(f"**Original Size:** {model['size']}")
363
 
364
  st.markdown("---")
365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
  # Demo data generator for when Excel files are not available
367
  def generate_demo_data():
368
  """Generate demo data for testing when Excel files are missing"""
369
  demo_data = {
370
  "Full Name": [
371
  "Demo Student 1", "Demo Student 2", "Demo Student 3", "Demo Student 4",
372
- "Demo Student 5", "Demo Student 6", "Demo Student 7", "Demo Student 8"
 
373
  ],
374
- "Laptop RAM": ["8GB", "16GB", "4GB", "32GB", "6GB", "12GB", "2GB", "24GB"],
375
- "Mobile RAM": ["4GB", "8GB", "3GB", "12GB", "6GB", "4GB", "2GB", "8GB"],
376
  "Laptop Operating System": [
377
  "Windows 11", "macOS Monterey", "Ubuntu 22.04", "Windows 10",
378
- "macOS Big Sur", "Fedora 36", "Windows 11", "macOS Ventura"
 
379
  ],
380
  "Mobile Operating System": [
381
  "Android 13", "iOS 16", "Android 12", "iOS 15",
382
- "Android 14", "iOS 17", "Android 11", "iOS 16"
 
383
  ]
384
  }
385
  return pd.DataFrame(demo_data)
@@ -388,10 +490,8 @@ def generate_demo_data():
388
  def prepare_user_options(df):
389
  """Safely prepare user options for selectbox, handling NaN values and mixed types"""
390
  try:
391
- # Get unique names and filter out NaN values
392
  unique_names = df["Full Name"].dropna().unique()
393
 
394
- # Convert to strings and filter out any remaining non-string values
395
  valid_names = []
396
  for name in unique_names:
397
  try:
@@ -401,7 +501,6 @@ def prepare_user_options(df):
401
  except:
402
  continue
403
 
404
- # Create options list with proper string concatenation
405
  options = ["Select a student..."] + sorted(valid_names)
406
  return options
407
  except Exception as e:
 
1
  #!/usr/bin/env python3
2
  """
3
+ Enhanced LLM Compatibility Advisor - Complete with Quantization & Advanced Features
4
  Author: Assistant
5
+ Description: Comprehensive device-based LLM recommendations with quantization, comparison, and download assistance
6
  Requirements: streamlit, pandas, plotly, openpyxl
7
  """
8
 
 
13
  import plotly.express as px
14
  import plotly.graph_objects as go
15
  from typing import Optional, Tuple, List, Dict
16
+ import json
17
 
18
  # βœ… MUST be the first Streamlit command
19
  st.set_page_config(
20
+ page_title="Enhanced LLM Compatibility Advisor",
21
  layout="wide",
22
  page_icon="🧠",
23
  initial_sidebar_state="expanded"
 
42
  except Exception as e:
43
  return None, f"Error loading '{path}': {str(e)}"
44
 
 
45
  if combined_df.empty:
46
  return None, "No data found in Excel files."
47
  else:
 
71
 
72
  return None
73
 
74
+ # Quantization options and size calculations
75
+ QUANTIZATION_FORMATS = {
76
+ "FP16": {"multiplier": 1.0, "description": "Full precision, best quality", "icon": "πŸ”₯"},
77
+ "8-bit": {"multiplier": 0.5, "description": "50% smaller, good quality", "icon": "⚑"},
78
+ "4-bit": {"multiplier": 0.25, "description": "75% smaller, acceptable quality", "icon": "πŸ’Ž"},
79
+ "2-bit": {"multiplier": 0.125, "description": "87.5% smaller, experimental", "icon": "πŸ§ͺ"}
80
  }
81
 
82
+ def calculate_quantized_size(base_size_str, quant_format):
83
+ """Calculate quantized model size"""
84
+ size_match = re.search(r'(\d+\.?\d*)', base_size_str)
85
+ if not size_match:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  return base_size_str
87
+
88
+ base_size = float(size_match.group(1))
89
+ unit = base_size_str.replace(size_match.group(1), "").strip()
90
+
91
+ multiplier = QUANTIZATION_FORMATS[quant_format]["multiplier"]
92
+ new_size = base_size * multiplier
93
+
94
+ return f"{new_size:.1f}{unit}"
95
 
96
+ # Enhanced LLM database with more models and metadata
 
 
 
 
 
 
 
 
 
 
 
 
97
  LLM_DATABASE = {
98
  "ultra_low": { # ≀2GB
99
  "general": [
100
+ {"name": "TinyLlama-1.1B-Chat", "size": "637MB", "description": "Compact chat model", "parameters": "1.1B", "context": "2K"},
101
+ {"name": "DistilBERT-base", "size": "268MB", "description": "Efficient BERT variant", "parameters": "66M", "context": "512"},
102
+ {"name": "all-MiniLM-L6-v2", "size": "91MB", "description": "Sentence embeddings", "parameters": "22M", "context": "256"},
103
+ {"name": "OpenELM-270M", "size": "540MB", "description": "Apple's efficient model", "parameters": "270M", "context": "2K"}
104
  ],
105
  "code": [
106
+ {"name": "CodeT5-small", "size": "242MB", "description": "Code generation", "parameters": "60M", "context": "512"},
107
+ {"name": "Replit-code-v1-3B", "size": "1.2GB", "description": "Code completion", "parameters": "3B", "context": "4K"}
108
  ]
109
  },
110
  "low": { # 3-4GB
111
  "general": [
112
+ {"name": "Phi-1.5", "size": "2.8GB", "description": "Microsoft's efficient model", "parameters": "1.3B", "context": "2K"},
113
+ {"name": "Gemma-2B", "size": "1.4GB", "description": "Google's compact model", "parameters": "2B", "context": "8K"},
114
+ {"name": "OpenLLaMA-3B", "size": "2.1GB", "description": "Open source LLaMA", "parameters": "3B", "context": "2K"},
115
+ {"name": "StableLM-3B", "size": "2.2GB", "description": "Stability AI model", "parameters": "3B", "context": "4K"}
116
  ],
117
  "code": [
118
+ {"name": "CodeGen-2B", "size": "1.8GB", "description": "Salesforce code model", "parameters": "2B", "context": "2K"},
119
+ {"name": "StarCoder-1B", "size": "1.1GB", "description": "BigCode project", "parameters": "1B", "context": "8K"}
120
  ],
121
  "chat": [
122
+ {"name": "Alpaca-3B", "size": "2.0GB", "description": "Stanford's instruction model", "parameters": "3B", "context": "2K"},
123
+ {"name": "Vicuna-3B", "size": "2.1GB", "description": "ChatGPT-style training", "parameters": "3B", "context": "2K"}
124
  ]
125
  },
126
  "moderate_low": { # 5-6GB
127
  "general": [
128
+ {"name": "Phi-2", "size": "5.2GB", "description": "Microsoft's 2.7B model", "parameters": "2.7B", "context": "2K"},
129
+ {"name": "Gemma-7B-it", "size": "4.2GB", "description": "Google instruction tuned", "parameters": "7B", "context": "8K"},
130
+ {"name": "Mistral-7B-v0.1", "size": "4.1GB", "description": "Mistral AI base model", "parameters": "7B", "context": "8K"},
131
+ {"name": "Llama-2-7B", "size": "4.0GB", "description": "Meta's foundation model", "parameters": "7B", "context": "4K"}
132
  ],
133
  "code": [
134
+ {"name": "CodeLlama-7B", "size": "3.8GB", "description": "Meta's code specialist", "parameters": "7B", "context": "16K"},
135
+ {"name": "StarCoder-7B", "size": "4.0GB", "description": "Code generation expert", "parameters": "7B", "context": "8K"}
136
  ],
137
  "chat": [
138
+ {"name": "Zephyr-7B-beta", "size": "4.2GB", "description": "HuggingFace chat model", "parameters": "7B", "context": "32K"},
139
+ {"name": "Neural-Chat-7B", "size": "4.1GB", "description": "Intel optimized", "parameters": "7B", "context": "32K"}
140
  ]
141
  },
142
  "moderate": { # 7-8GB
143
  "general": [
144
+ {"name": "Llama-2-7B-Chat", "size": "3.5GB", "description": "Meta's popular chat model", "parameters": "7B", "context": "4K"},
145
+ {"name": "Mistral-7B-Instruct-v0.2", "size": "4.1GB", "description": "Latest Mistral instruct", "parameters": "7B", "context": "32K"},
146
+ {"name": "Qwen-7B-Chat", "size": "4.0GB", "description": "Alibaba's multilingual", "parameters": "7B", "context": "32K"},
147
+ {"name": "Solar-10.7B-Instruct", "size": "5.8GB", "description": "Upstage's efficient model", "parameters": "10.7B", "context": "4K"}
148
  ],
149
  "code": [
150
+ {"name": "CodeLlama-7B-Instruct", "size": "3.8GB", "description": "Instruction-tuned CodeLlama", "parameters": "7B", "context": "16K"},
151
+ {"name": "WizardCoder-7B", "size": "4.0GB", "description": "Enhanced coding abilities", "parameters": "7B", "context": "16K"},
152
+ {"name": "Phind-CodeLlama-34B-v2", "size": "4.2GB", "description": "4-bit quantized version", "parameters": "34B", "context": "16K"}
153
  ],
154
  "reasoning": [
155
+ {"name": "WizardMath-7B", "size": "4.0GB", "description": "Mathematical reasoning", "parameters": "7B", "context": "2K"},
156
+ {"name": "MetaMath-7B", "size": "3.9GB", "description": "Math problem solving", "parameters": "7B", "context": "2K"}
157
  ]
158
  },
159
  "good": { # 9-16GB
160
  "general": [
161
+ {"name": "Llama-2-13B-Chat", "size": "7.3GB", "description": "Larger Llama variant", "parameters": "13B", "context": "4K"},
162
+ {"name": "Vicuna-13B-v1.5", "size": "7.2GB", "description": "Enhanced Vicuna", "parameters": "13B", "context": "16K"},
163
+ {"name": "OpenChat-3.5", "size": "7.1GB", "description": "High-quality chat model", "parameters": "7B", "context": "8K"},
164
+ {"name": "Nous-Hermes-2-Mixtral-8x7B-DPO", "size": "12.9GB", "description": "4-bit quantized MoE", "parameters": "47B", "context": "32K"}
165
  ],
166
  "code": [
167
+ {"name": "CodeLlama-13B-Instruct", "size": "7.3GB", "description": "Larger code model", "parameters": "13B", "context": "16K"},
168
+ {"name": "WizardCoder-15B", "size": "8.2GB", "description": "Advanced coding", "parameters": "15B", "context": "16K"},
169
+ {"name": "StarCoder-15B", "size": "8.5GB", "description": "Large code model", "parameters": "15B", "context": "8K"}
170
  ],
171
  "multimodal": [
172
+ {"name": "LLaVA-7B", "size": "7.0GB", "description": "Vision + language", "parameters": "7B", "context": "2K"},
173
+ {"name": "MiniGPT-4-7B", "size": "6.8GB", "description": "Multimodal chat", "parameters": "7B", "context": "2K"},
174
+ {"name": "Instructblip-7B", "size": "7.2GB", "description": "Instruction-tuned VLM", "parameters": "7B", "context": "2K"}
175
  ],
176
  "reasoning": [
177
+ {"name": "WizardMath-13B", "size": "7.3GB", "description": "Advanced math", "parameters": "13B", "context": "2K"},
178
+ {"name": "Orca-2-13B", "size": "7.4GB", "description": "Microsoft reasoning", "parameters": "13B", "context": "4K"}
179
  ]
180
  },
181
  "high": { # 17-32GB
182
  "general": [
183
+ {"name": "Mixtral-8x7B-Instruct-v0.1", "size": "26.9GB", "description": "Mixture of experts", "parameters": "47B", "context": "32K"},
184
+ {"name": "Llama-2-70B-Chat", "size": "38.0GB", "description": "8-bit quantized", "parameters": "70B", "context": "4K"},
185
+ {"name": "Yi-34B-Chat", "size": "19.5GB", "description": "01.AI's large model", "parameters": "34B", "context": "200K"},
186
+ {"name": "Nous-Hermes-2-Yi-34B", "size": "19.2GB", "description": "Enhanced Yi variant", "parameters": "34B", "context": "200K"}
187
  ],
188
  "code": [
189
+ {"name": "CodeLlama-34B-Instruct", "size": "19.0GB", "description": "Large code specialist", "parameters": "34B", "context": "16K"},
190
+ {"name": "DeepSeek-Coder-33B", "size": "18.5GB", "description": "DeepSeek's coder", "parameters": "33B", "context": "16K"},
191
+ {"name": "WizardCoder-34B", "size": "19.2GB", "description": "Enterprise coding", "parameters": "34B", "context": "16K"}
192
  ],
193
  "reasoning": [
194
+ {"name": "WizardMath-70B", "size": "38.5GB", "description": "8-bit quantized math", "parameters": "70B", "context": "2K"},
195
+ {"name": "MetaMath-70B", "size": "38.0GB", "description": "8-bit math reasoning", "parameters": "70B", "context": "2K"}
196
  ]
197
  },
198
  "ultra_high": { # >32GB
199
  "general": [
200
+ {"name": "Llama-2-70B", "size": "130GB", "description": "Full precision", "parameters": "70B", "context": "4K"},
201
+ {"name": "Mixtral-8x22B", "size": "176GB", "description": "Latest mixture model", "parameters": "141B", "context": "64K"},
202
+ {"name": "Qwen-72B", "size": "145GB", "description": "Alibaba's flagship", "parameters": "72B", "context": "32K"},
203
+ {"name": "Llama-3-70B", "size": "140GB", "description": "Meta's latest", "parameters": "70B", "context": "8K"}
204
  ],
205
  "code": [
206
+ {"name": "CodeLlama-34B", "size": "68GB", "description": "Full precision code", "parameters": "34B", "context": "16K"},
207
+ {"name": "DeepSeek-Coder-33B", "size": "66GB", "description": "Full precision coding", "parameters": "33B", "context": "16K"}
208
  ],
209
  "reasoning": [
210
+ {"name": "WizardMath-70B", "size": "130GB", "description": "Full precision math", "parameters": "70B", "context": "2K"},
211
+ {"name": "Goat-70B", "size": "132GB", "description": "Arithmetic reasoning", "parameters": "70B", "context": "2K"}
212
  ]
213
  }
214
  }
215
 
216
+ # GPU compatibility database
217
+ GPU_DATABASE = {
218
+ "RTX 3060": {"vram": 8, "performance": "mid", "architecture": "Ampere"},
219
+ "RTX 3070": {"vram": 8, "performance": "high", "architecture": "Ampere"},
220
+ "RTX 3080": {"vram": 10, "performance": "high", "architecture": "Ampere"},
221
+ "RTX 3090": {"vram": 24, "performance": "ultra", "architecture": "Ampere"},
222
+ "RTX 4060": {"vram": 8, "performance": "mid", "architecture": "Ada Lovelace"},
223
+ "RTX 4070": {"vram": 12, "performance": "high", "architecture": "Ada Lovelace"},
224
+ "RTX 4080": {"vram": 16, "performance": "ultra", "architecture": "Ada Lovelace"},
225
+ "RTX 4090": {"vram": 24, "performance": "ultra", "architecture": "Ada Lovelace"},
226
+ "Apple M1": {"vram": 8, "performance": "mid", "architecture": "Apple Silicon"},
227
+ "Apple M2": {"vram": 16, "performance": "high", "architecture": "Apple Silicon"},
228
+ "Apple M3": {"vram": 24, "performance": "ultra", "architecture": "Apple Silicon"},
229
+ "RX 6700 XT": {"vram": 12, "performance": "mid", "architecture": "RDNA 2"},
230
+ "RX 7900 XTX": {"vram": 24, "performance": "ultra", "architecture": "RDNA 3"},
231
+ }
232
+
233
+ def get_gpu_recommendations(gpu_name, ram_gb):
234
+ """Get GPU-specific model recommendations"""
235
+ if gpu_name == "No GPU":
236
+ return "CPU-only models recommended", "Use 4-bit quantization for better performance"
237
+
238
+ gpu_info = GPU_DATABASE.get(gpu_name.split(" (")[0], {"vram": 0, "performance": "low"})
239
+ vram = gpu_info["vram"]
240
+
241
+ if vram <= 8:
242
+ return f"7B models with 4-bit quantization", f"Estimated VRAM usage: ~{vram-1}GB"
243
+ elif vram <= 12:
244
+ return f"13B models with 8-bit quantization", f"Estimated VRAM usage: ~{vram-1}GB"
245
+ elif vram <= 16:
246
+ return f"13B models at FP16 or 30B with 4-bit", f"Estimated VRAM usage: ~{vram-1}GB"
247
+ else:
248
+ return f"70B models with 4-bit quantization", f"Estimated VRAM usage: ~{vram-2}GB"
249
+
250
+ def predict_inference_speed(model_size_gb, ram_gb, has_gpu=False, gpu_name=""):
251
+ """Predict approximate inference speed"""
252
+ if model_size_gb > ram_gb:
253
+ return "❌ Insufficient RAM", "Consider smaller model or quantization"
254
+
255
+ if has_gpu and gpu_name != "No GPU":
256
+ gpu_info = GPU_DATABASE.get(gpu_name.split(" (")[0], {"performance": "low"})
257
+ perf = gpu_info["performance"]
258
+
259
+ if perf == "ultra":
260
+ if model_size_gb <= 4:
261
+ return "⚑ Blazing Fast", "~50-100 tokens/sec"
262
+ elif model_size_gb <= 8:
263
+ return "πŸš€ Very Fast", "~30-60 tokens/sec"
264
+ elif model_size_gb <= 16:
265
+ return "πŸƒ Fast", "~15-30 tokens/sec"
266
+ else:
267
+ return "🐌 Moderate", "~5-15 tokens/sec"
268
+ elif perf == "high":
269
+ if model_size_gb <= 4:
270
+ return "⚑ Very Fast", "~30-50 tokens/sec"
271
+ elif model_size_gb <= 8:
272
+ return "πŸš€ Fast", "~15-30 tokens/sec"
273
+ else:
274
+ return "🐌 Moderate", "~5-15 tokens/sec"
275
+ else: # mid performance
276
+ if model_size_gb <= 4:
277
+ return "⚑ Fast", "~15-30 tokens/sec"
278
+ else:
279
+ return "🐌 Slow", "~3-10 tokens/sec"
280
+ else:
281
+ # CPU inference
282
+ if model_size_gb <= 2:
283
+ return "⚑ Acceptable", "~5-15 tokens/sec"
284
+ elif model_size_gb <= 4:
285
+ return "🐌 Slow", "~1-5 tokens/sec"
286
+ else:
287
+ return "🐌 Very Slow", "~0.5-2 tokens/sec"
288
+
289
  # Enhanced LLM recommendation with performance tiers
290
  def recommend_llm(ram_str) -> Tuple[str, str, str, Dict[str, List[Dict]]]:
291
  """Returns (recommendation, performance_tier, additional_info, detailed_models)"""
 
360
  else:
361
  return "πŸ’»", os_name
362
 
363
+ # Model comparison function
364
+ def create_model_comparison_table(selected_models, quantization_type="FP16"):
365
+ """Create a comparison table for selected models"""
366
+ comparison_data = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367
 
368
+ for model_info in selected_models:
369
+ quant_size = calculate_quantized_size(model_info['size'], quantization_type)
370
+
371
+ # Extract numeric size for VRAM calculation
372
+ size_match = re.search(r'(\d+\.?\d*)', quant_size)
373
+ if size_match:
374
+ size_num = float(size_match.group(1))
375
+ estimated_vram = f"{size_num * 1.2:.1f}GB"
376
+ else:
377
+ estimated_vram = "Unknown"
378
+
379
+ comparison_data.append({
380
+ 'Model': model_info['name'],
381
+ 'Parameters': model_info.get('parameters', 'Unknown'),
382
+ 'Context': model_info.get('context', 'Unknown'),
383
+ 'Original Size': model_info['size'],
384
+ f'{quantization_type} Size': quant_size,
385
+ 'Est. VRAM': estimated_vram,
386
+ 'Description': model_info['description']
387
+ })
388
+
389
+ return pd.DataFrame(comparison_data)
390
 
391
  # Enhanced model details display function
392
  def display_model_categories(models_dict: Dict[str, List[Dict]], ram_gb: int, show_quantization=True):
 
399
  for category, model_list in models_dict.items():
400
  if model_list:
401
  with st.expander(f"πŸ“‚ {category.replace('_', ' ').title()} Models"):
402
+ for model in model_list[:6]: # Show top 6 models per category
403
  st.markdown(f"**{model['name']}**")
404
+
405
+ # Model details
406
+ detail_col1, detail_col2, detail_col3 = st.columns(3)
407
+ with detail_col1:
408
+ st.caption(f"πŸ“Š {model.get('parameters', 'Unknown')} params")
409
+ with detail_col2:
410
+ st.caption(f"πŸ” {model.get('context', 'Unknown')} context")
411
+ with detail_col3:
412
+ st.caption(f"πŸ’Ύ {model['size']} original")
413
+
414
  st.markdown(f"*{model['description']}*")
415
 
416
  if show_quantization:
 
420
  with quant_cols[i]:
421
  quant_size = calculate_quantized_size(model['size'], quant_type)
422
  st.metric(
423
+ label=f"{quant_info['icon']} {quant_type}",
424
  value=quant_size,
425
  help=quant_info['description']
426
  )
 
 
427
 
428
  st.markdown("---")
429
 
430
+ # Performance visualization
431
+ def create_performance_chart(df):
432
+ """Create a performance distribution chart"""
433
+ laptop_rams = df["Laptop RAM"].apply(extract_numeric_ram).dropna()
434
+ mobile_rams = df["Mobile RAM"].apply(extract_numeric_ram).dropna()
435
+
436
+ fig = go.Figure()
437
+
438
+ fig.add_trace(go.Histogram(
439
+ x=laptop_rams,
440
+ name="Laptop RAM",
441
+ opacity=0.7,
442
+ nbinsx=10,
443
+ marker_color='#1f77b4'
444
+ ))
445
+
446
+ fig.add_trace(go.Histogram(
447
+ x=mobile_rams,
448
+ name="Mobile RAM",
449
+ opacity=0.7,
450
+ nbinsx=10,
451
+ marker_color='#ff7f0e'
452
+ ))
453
+
454
+ fig.update_layout(
455
+ title="RAM Distribution Across Devices",
456
+ xaxis_title="RAM (GB)",
457
+ yaxis_title="Number of Students",
458
+ barmode='overlay',
459
+ height=400,
460
+ showlegend=True
461
+ )
462
+
463
+ return fig
464
+
465
  # Demo data generator for when Excel files are not available
466
  def generate_demo_data():
467
  """Generate demo data for testing when Excel files are missing"""
468
  demo_data = {
469
  "Full Name": [
470
  "Demo Student 1", "Demo Student 2", "Demo Student 3", "Demo Student 4",
471
+ "Demo Student 5", "Demo Student 6", "Demo Student 7", "Demo Student 8",
472
+ "Demo Student 9", "Demo Student 10", "Demo Student 11", "Demo Student 12"
473
  ],
474
+ "Laptop RAM": ["8GB", "16GB", "4GB", "32GB", "6GB", "12GB", "2GB", "24GB", "64GB", "3GB", "20GB", "10GB"],
475
+ "Mobile RAM": ["4GB", "8GB", "3GB", "12GB", "6GB", "4GB", "2GB", "8GB", "16GB", "3GB", "6GB", "8GB"],
476
  "Laptop Operating System": [
477
  "Windows 11", "macOS Monterey", "Ubuntu 22.04", "Windows 10",
478
+ "macOS Big Sur", "Fedora 36", "Windows 11", "macOS Ventura",
479
+ "Ubuntu 20.04", "Windows 10", "macOS Sonoma", "Pop!_OS 22.04"
480
  ],
481
  "Mobile Operating System": [
482
  "Android 13", "iOS 16", "Android 12", "iOS 15",
483
+ "Android 14", "iOS 17", "Android 11", "iOS 16",
484
+ "Android 13", "iOS 15", "Android 14", "iOS 17"
485
  ]
486
  }
487
  return pd.DataFrame(demo_data)
 
490
  def prepare_user_options(df):
491
  """Safely prepare user options for selectbox, handling NaN values and mixed types"""
492
  try:
 
493
  unique_names = df["Full Name"].dropna().unique()
494
 
 
495
  valid_names = []
496
  for name in unique_names:
497
  try:
 
501
  except:
502
  continue
503
 
 
504
  options = ["Select a student..."] + sorted(valid_names)
505
  return options
506
  except Exception as e: