Update src/streamlit_app.py
Browse files- src/streamlit_app.py +589 -590
src/streamlit_app.py
CHANGED
@@ -1,34 +1,33 @@
|
|
1 |
-
|
2 |
-
|
3 |
LLM Compatibility Advisor - Streamlined with Download Sizes
|
4 |
Author: Assistant
|
5 |
Description: Provides device-based LLM recommendations with popular models and download sizes
|
6 |
Requirements: streamlit, pandas, plotly, openpyxl
|
7 |
"""
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
def load_data():
|
27 |
try:
|
28 |
# Use actual file paths
|
29 |
df1 = pd.read_excel("src/BITS_INTERNS.xlsx", sheet_name="Form Responses 1")
|
30 |
-
|
31 |
-
|
32 |
|
33 |
df1.columns = df1.columns.str.strip()
|
34 |
df2.columns = df2.columns.str.strip()
|
@@ -46,581 +45,581 @@ def load_data():
|
|
46 |
return None, f"Error loading data: {str(e)}"
|
47 |
|
48 |
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
return None
|
53 |
-
|
54 |
-
ram_str = str(ram).lower().replace(" ", "")
|
55 |
-
|
56 |
-
# Handle various formats: "8GB", "8 GB", "8gb", "8192MB", etc.
|
57 |
-
gb_match = re.search(r"(\d+(?:\.\d+)?)(?:gb|g)", ram_str)
|
58 |
-
if gb_match:
|
59 |
-
return int(float(gb_match.group(1)))
|
60 |
-
|
61 |
-
# Handle MB format
|
62 |
-
mb_match = re.search(r"(\d+)(?:mb|m)", ram_str)
|
63 |
-
if mb_match:
|
64 |
-
return max(1, int(int(mb_match.group(1)) / 1024)) # Convert MB to GB
|
65 |
-
|
66 |
-
# Handle plain numbers (assume GB)
|
67 |
-
plain_match = re.search(r"(\d+)", ram_str)
|
68 |
-
if plain_match:
|
69 |
-
return int(plain_match.group(1))
|
70 |
-
|
71 |
return None
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
"
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
"
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
"
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
"
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
"
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
"
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
"
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
"
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
"
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
"
|
153 |
-
"
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
"
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
"
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
"
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
"
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
183 |
}
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
return fig
|
290 |
-
|
291 |
-
# Enhanced model details display function
|
292 |
-
def display_model_categories(models_dict: Dict[str, List[Dict]], ram_gb: int):
|
293 |
-
"""Display models organized by category with download sizes"""
|
294 |
-
if not models_dict:
|
295 |
-
return
|
296 |
-
|
297 |
-
st.markdown(f"### ๐ฏ Recommended Models for {ram_gb}GB RAM:")
|
298 |
-
|
299 |
-
for category, model_list in models_dict.items():
|
300 |
-
if model_list:
|
301 |
-
with st.expander(f"๐ {category.replace('_', ' ').title()} Models"):
|
302 |
-
for model in model_list[:8]: # Limit to top 8 per category
|
303 |
-
col1, col2, col3 = st.columns([3, 1, 2])
|
304 |
-
with col1:
|
305 |
-
st.markdown(f"**{model['name']}**")
|
306 |
-
with col2:
|
307 |
-
st.markdown(f"`{model['size']}`")
|
308 |
-
with col3:
|
309 |
-
st.markdown(f"*{model['description']}*")
|
310 |
-
|
311 |
-
# Main App
|
312 |
-
st.title("๐ง LLM Compatibility Advisor")
|
313 |
-
st.markdown("Get personalized recommendations from **150+ popular open source AI models** with download sizes!")
|
314 |
-
|
315 |
-
# Load data
|
316 |
-
df, error = load_data()
|
317 |
-
|
318 |
-
if error:
|
319 |
-
st.error(error)
|
320 |
-
st.info("Please ensure the Excel file 'BITS_INTERNS.xlsx' is in the same directory as this script.")
|
321 |
-
st.stop()
|
322 |
-
|
323 |
-
if df is None or df.empty:
|
324 |
-
st.error("No data found in the Excel file.")
|
325 |
-
st.stop()
|
326 |
-
|
327 |
-
# Sidebar filters and info
|
328 |
-
with st.sidebar:
|
329 |
-
st.header("๐ Filters & Info")
|
330 |
-
|
331 |
-
# Performance tier filter
|
332 |
-
performance_filter = st.multiselect(
|
333 |
-
"Filter by Performance Tier:",
|
334 |
-
["Ultra Low", "Low", "Moderate-Low", "Moderate", "Good", "High", "Ultra High", "Unknown"],
|
335 |
-
default=["Ultra Low", "Low", "Moderate-Low", "Moderate", "Good", "High", "Ultra High", "Unknown"]
|
336 |
-
)
|
337 |
-
|
338 |
-
# Model category filter
|
339 |
-
st.subheader("Model Categories")
|
340 |
-
show_categories = st.multiselect(
|
341 |
-
"Show specific categories:",
|
342 |
-
["general", "code", "chat", "reasoning", "multimodal"],
|
343 |
-
default=["general", "code", "chat"]
|
344 |
-
)
|
345 |
-
|
346 |
-
st.markdown("---")
|
347 |
-
st.markdown("### ๐ Quick Stats")
|
348 |
-
st.metric("Total Students", len(df))
|
349 |
-
st.metric("Popular Models", "150+")
|
350 |
-
|
351 |
-
# Calculate average RAM
|
352 |
-
avg_laptop_ram = df["Laptop RAM"].apply(extract_numeric_ram).mean()
|
353 |
-
avg_mobile_ram = df["Mobile RAM"].apply(extract_numeric_ram).mean()
|
354 |
-
|
355 |
-
if not pd.isna(avg_laptop_ram):
|
356 |
-
st.metric("Avg Laptop RAM", f"{avg_laptop_ram:.1f} GB")
|
357 |
-
if not pd.isna(avg_mobile_ram):
|
358 |
-
st.metric("Avg Mobile RAM", f"{avg_mobile_ram:.1f} GB")
|
359 |
-
|
360 |
-
# User selection with search
|
361 |
-
st.subheader("๐ค Individual Student Analysis")
|
362 |
-
selected_user = st.selectbox(
|
363 |
-
"Choose a student:",
|
364 |
-
options=[""] + list(df["Full Name"].unique()),
|
365 |
-
format_func=lambda x: "Select a student..." if x == "" else x
|
366 |
)
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
# Create enhanced batch table
|
417 |
-
df_display = df[["Full Name", "Laptop RAM", "Mobile RAM"]].copy()
|
418 |
-
|
419 |
-
# Add recommendations and performance tiers
|
420 |
-
laptop_recommendations = df["Laptop RAM"].apply(lambda x: recommend_llm(x)[0])
|
421 |
-
mobile_recommendations = df["Mobile RAM"].apply(lambda x: recommend_llm(x)[0])
|
422 |
-
laptop_tiers = df["Laptop RAM"].apply(lambda x: recommend_llm(x)[1])
|
423 |
-
mobile_tiers = df["Mobile RAM"].apply(lambda x: recommend_llm(x)[1])
|
424 |
-
|
425 |
-
df_display["Laptop LLM"] = laptop_recommendations
|
426 |
-
df_display["Mobile LLM"] = mobile_recommendations
|
427 |
-
df_display["Laptop Tier"] = laptop_tiers
|
428 |
-
df_display["Mobile Tier"] = mobile_tiers
|
429 |
-
|
430 |
-
# Filter based on sidebar selections (RAM range filter removed)
|
431 |
-
mask = (laptop_tiers.isin(performance_filter) | mobile_tiers.isin(performance_filter))
|
432 |
-
|
433 |
-
df_filtered = df_display[mask]
|
434 |
-
|
435 |
-
# Display filtered table
|
436 |
-
st.subheader(f"๐ Student Recommendations ({len(df_filtered)} students)")
|
437 |
-
st.dataframe(
|
438 |
-
df_filtered,
|
439 |
-
use_container_width=True,
|
440 |
-
column_config={
|
441 |
-
"Full Name": st.column_config.TextColumn("Student Name", width="medium"),
|
442 |
-
"Laptop RAM": st.column_config.TextColumn("Laptop RAM", width="small"),
|
443 |
-
"Mobile RAM": st.column_config.TextColumn("Mobile RAM", width="small"),
|
444 |
-
"Laptop LLM": st.column_config.TextColumn("Laptop Recommendation", width="large"),
|
445 |
-
"Mobile LLM": st.column_config.TextColumn("Mobile Recommendation", width="large"),
|
446 |
-
"Laptop Tier": st.column_config.TextColumn("L-Tier", width="small"),
|
447 |
-
"Mobile Tier": st.column_config.TextColumn("M-Tier", width="small"),
|
448 |
-
}
|
449 |
)
|
450 |
-
|
451 |
-
#
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
tier_col1, tier_col2 = st.columns(2)
|
460 |
-
|
461 |
-
with tier_col1:
|
462 |
-
st.markdown("**Laptop Performance Tiers:**")
|
463 |
-
laptop_tier_counts = laptop_tiers.value_counts()
|
464 |
-
for tier, count in laptop_tier_counts.items():
|
465 |
-
percentage = (count / len(laptop_tiers)) * 100
|
466 |
-
st.write(f"โข {tier}: {count} students ({percentage:.1f}%)")
|
467 |
-
|
468 |
-
with tier_col2:
|
469 |
-
st.markdown("**Mobile Performance Tiers:**")
|
470 |
-
mobile_tier_counts = mobile_tiers.value_counts()
|
471 |
-
for tier, count in mobile_tier_counts.items():
|
472 |
-
percentage = (count / len(mobile_tier_counts)) * 100
|
473 |
-
st.write(f"โข {tier}: {count} students ({percentage:.1f}%)")
|
474 |
-
|
475 |
-
# Model Explorer Section
|
476 |
st.markdown("---")
|
477 |
-
st.
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
)
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
503 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
504 |
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
# Display models in a detailed table
|
512 |
-
for model in models:
|
513 |
-
with st.container():
|
514 |
-
col1, col2, col3 = st.columns([3, 1, 3])
|
515 |
-
with col1:
|
516 |
-
st.markdown(f"### {model['name']}")
|
517 |
-
with col2:
|
518 |
-
st.markdown(f"**{model['size']}**")
|
519 |
-
st.caption("Download Size")
|
520 |
-
with col3:
|
521 |
-
st.markdown(f"*{model['description']}*")
|
522 |
-
# Add download suggestion
|
523 |
-
if "Llama" in model['name']:
|
524 |
-
st.caption("๐ Available on Hugging Face & Ollama")
|
525 |
-
elif "Mistral" in model['name']:
|
526 |
-
st.caption("๐ Available on Hugging Face & Mistral AI")
|
527 |
-
elif "Gemma" in model['name']:
|
528 |
-
st.caption("๐ Available on Hugging Face & Google")
|
529 |
-
else:
|
530 |
-
st.caption("๐ Available on Hugging Face")
|
531 |
-
st.markdown("---")
|
532 |
-
else:
|
533 |
-
st.info(f"No {selected_category} models available for {selected_ram_range}")
|
534 |
-
|
535 |
-
# Enhanced reference guide
|
536 |
-
with st.expander("๐ Model Guide & Download Information"):
|
537 |
-
st.markdown("""
|
538 |
-
## ๐ Popular Models by Category
|
539 |
-
|
540 |
-
### ๐ฏ **General Purpose Champions**
|
541 |
-
- **Llama-2 Series**: Meta's flagship models (7B, 13B, 70B)
|
542 |
-
- **Mistral Series**: Excellent efficiency and performance
|
543 |
-
- **Gemma**: Google's efficient models (2B, 7B)
|
544 |
-
- **Phi**: Microsoft's compact powerhouses
|
545 |
-
|
546 |
-
### ๐ป **Code Specialists**
|
547 |
-
- **CodeLlama**: Meta's dedicated coding models
|
548 |
-
- **StarCoder**: BigCode's programming experts
|
549 |
-
- **WizardCoder**: Enhanced coding capabilities
|
550 |
-
- **DeepSeek-Coder**: Chinese tech giant's coder
|
551 |
-
|
552 |
-
### ๐ฌ **Chat Optimized**
|
553 |
-
- **Vicuna**: UC Berkeley's ChatGPT alternative
|
554 |
-
- **Zephyr**: HuggingFace's chat specialist
|
555 |
-
- **OpenChat**: High-quality conversation models
|
556 |
-
- **Neural-Chat**: Intel-optimized chat models
|
557 |
-
|
558 |
-
### ๐งฎ **Reasoning Masters**
|
559 |
-
- **WizardMath**: Mathematical problem solving
|
560 |
-
- **MetaMath**: Advanced arithmetic reasoning
|
561 |
-
- **Orca-2**: Microsoft's reasoning specialist
|
562 |
-
- **Goat**: Specialized arithmetic model
|
563 |
-
|
564 |
-
### ๐๏ธ **Multimodal Models**
|
565 |
-
- **LLaVA**: Large Language and Vision Assistant
|
566 |
-
- **MiniGPT-4**: Multimodal conversational AI
|
567 |
-
|
568 |
-
## ๐พ Download Size Reference
|
569 |
-
|
570 |
-
| Model Size | FP16 | 8-bit | 4-bit | Use Case |
|
571 |
-
|------------|------|-------|-------|----------|
|
572 |
-
| **1-3B** | 2-6GB | 1-3GB | 0.5-1.5GB | Mobile, Edge |
|
573 |
-
| **7B** | 13GB | 7GB | 3.5GB | Desktop, Laptop |
|
574 |
-
| **13B** | 26GB | 13GB | 7GB | Workstation |
|
575 |
-
| **30-34B** | 60GB | 30GB | 15GB | Server, Cloud |
|
576 |
-
| **70B** | 140GB | 70GB | 35GB | High-end Server |
|
577 |
-
|
578 |
-
## ๐ ๏ธ Where to Download
|
579 |
-
|
580 |
-
### **Primary Sources**
|
581 |
-
- **๐ค Hugging Face**: Largest repository with 400,000+ models
|
582 |
-
- **๐ฆ Ollama**: Simple CLI tool for local deployment
|
583 |
-
- **๐ฆ LM Studio**: User-friendly GUI for model management
|
584 |
-
|
585 |
-
### **Quantized Formats**
|
586 |
-
- **GGUF**: Best for CPU inference (llama.cpp)
|
587 |
-
- **GPTQ**: GPU-optimized quantization
|
588 |
-
- **AWQ**: Advanced weight quantization
|
589 |
-
|
590 |
-
### **Download Tips**
|
591 |
-
- Use `git lfs` for large models from Hugging Face
|
592 |
-
- Consider bandwidth and storage before downloading
|
593 |
-
- Start with 4-bit quantized versions for testing
|
594 |
-
- Use `ollama pull model_name` for easiest setup
|
595 |
-
|
596 |
-
## ๐ง Optimization Strategies
|
597 |
-
|
598 |
-
### **Memory Reduction**
|
599 |
-
- **4-bit quantization**: 75% memory reduction
|
600 |
-
- **8-bit quantization**: 50% memory reduction
|
601 |
-
- **CPU offloading**: Use system RAM for overflow
|
602 |
-
|
603 |
-
### **Speed Optimization**
|
604 |
-
- **GPU acceleration**: CUDA, ROCm, Metal
|
605 |
-
- **Batch processing**: Process multiple requests
|
606 |
-
- **Context caching**: Reuse computations
|
607 |
-
""")
|
608 |
|
609 |
-
|
610 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
611 |
st.markdown("""
|
612 |
-
|
613 |
-
|
614 |
-
|
615 |
-
-
|
616 |
-
-
|
617 |
-
-
|
618 |
-
|
619 |
-
|
620 |
-
|
621 |
-
-
|
622 |
-
-
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
"""
|
3 |
LLM Compatibility Advisor - Streamlined with Download Sizes
|
4 |
Author: Assistant
|
5 |
Description: Provides device-based LLM recommendations with popular models and download sizes
|
6 |
Requirements: streamlit, pandas, plotly, openpyxl
|
7 |
"""
|
8 |
|
9 |
+
import streamlit as st
|
10 |
+
import pandas as pd
|
11 |
+
import re
|
12 |
+
import plotly.express as px
|
13 |
+
import plotly.graph_objects as go
|
14 |
+
from typing import Optional, Tuple, List, Dict
|
15 |
+
|
16 |
+
# โ
MUST be the first Streamlit command
|
17 |
+
st.set_page_config(
|
18 |
+
page_title="LLM Compatibility Advisor",
|
19 |
+
layout="wide",
|
20 |
+
page_icon="๐ง ",
|
21 |
+
initial_sidebar_state="expanded"
|
22 |
+
)
|
23 |
+
|
24 |
+
# Enhanced data loading with error handling
|
25 |
+
@st.cache_data
|
26 |
def load_data():
|
27 |
try:
|
28 |
# Use actual file paths
|
29 |
df1 = pd.read_excel("src/BITS_INTERNS.xlsx", sheet_name="Form Responses 1")
|
30 |
+
df2 = pd.read_excel("/mnt/data/Summer of AI - ICFAI (Responses) (3).xlsx")
|
|
|
31 |
|
32 |
df1.columns = df1.columns.str.strip()
|
33 |
df2.columns = df2.columns.str.strip()
|
|
|
45 |
return None, f"Error loading data: {str(e)}"
|
46 |
|
47 |
|
48 |
+
# Enhanced RAM extraction with better parsing
|
49 |
+
def extract_numeric_ram(ram) -> Optional[int]:
|
50 |
+
if pd.isna(ram):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
return None
|
52 |
+
|
53 |
+
ram_str = str(ram).lower().replace(" ", "")
|
54 |
+
|
55 |
+
# Handle various formats: "8GB", "8 GB", "8gb", "8192MB", etc.
|
56 |
+
gb_match = re.search(r"(\d+(?:\.\d+)?)(?:gb|g)", ram_str)
|
57 |
+
if gb_match:
|
58 |
+
return int(float(gb_match.group(1)))
|
59 |
+
|
60 |
+
# Handle MB format
|
61 |
+
mb_match = re.search(r"(\d+)(?:mb|m)", ram_str)
|
62 |
+
if mb_match:
|
63 |
+
return max(1, int(int(mb_match.group(1)) / 1024)) # Convert MB to GB
|
64 |
+
|
65 |
+
# Handle plain numbers (assume GB)
|
66 |
+
plain_match = re.search(r"(\d+)", ram_str)
|
67 |
+
if plain_match:
|
68 |
+
return int(plain_match.group(1))
|
69 |
+
|
70 |
+
return None
|
71 |
+
|
72 |
+
# Streamlined LLM database with popular models and download sizes
|
73 |
+
LLM_DATABASE = {
|
74 |
+
"ultra_low": { # โค2GB
|
75 |
+
"general": [
|
76 |
+
{"name": "TinyLlama-1.1B-Chat", "size": "637MB", "description": "Compact chat model"},
|
77 |
+
{"name": "DistilBERT-base", "size": "268MB", "description": "Efficient BERT variant"},
|
78 |
+
{"name": "all-MiniLM-L6-v2", "size": "91MB", "description": "Sentence embeddings"}
|
79 |
+
],
|
80 |
+
"code": [
|
81 |
+
{"name": "CodeT5-small", "size": "242MB", "description": "Code generation"},
|
82 |
+
{"name": "Replit-code-v1-3B", "size": "1.2GB", "description": "Code completion"}
|
83 |
+
]
|
84 |
+
},
|
85 |
+
"low": { # 3-4GB
|
86 |
+
"general": [
|
87 |
+
{"name": "Phi-1.5", "size": "2.8GB", "description": "Microsoft's efficient model"},
|
88 |
+
{"name": "Gemma-2B", "size": "1.4GB", "description": "Google's compact model"},
|
89 |
+
{"name": "OpenLLaMA-3B", "size": "2.1GB", "description": "Open source LLaMA"}
|
90 |
+
],
|
91 |
+
"code": [
|
92 |
+
{"name": "CodeGen-2B", "size": "1.8GB", "description": "Salesforce code model"},
|
93 |
+
{"name": "StarCoder-1B", "size": "1.1GB", "description": "BigCode project"}
|
94 |
+
],
|
95 |
+
"chat": [
|
96 |
+
{"name": "Alpaca-3B", "size": "2.0GB", "description": "Stanford's instruction model"},
|
97 |
+
{"name": "Vicuna-3B", "size": "2.1GB", "description": "ChatGPT-style training"}
|
98 |
+
]
|
99 |
+
},
|
100 |
+
"moderate_low": { # 5-6GB
|
101 |
+
"general": [
|
102 |
+
{"name": "Phi-2", "size": "5.2GB", "description": "Microsoft's 2.7B model"},
|
103 |
+
{"name": "Gemma-7B-it", "size": "4.2GB", "description": "Google instruction tuned"},
|
104 |
+
{"name": "Mistral-7B-v0.1", "size": "4.1GB", "description": "Mistral AI base model"}
|
105 |
+
],
|
106 |
+
"code": [
|
107 |
+
{"name": "CodeLlama-7B", "size": "3.8GB", "description": "Meta's code specialist"},
|
108 |
+
{"name": "StarCoder-7B", "size": "4.0GB", "description": "Code generation expert"}
|
109 |
+
],
|
110 |
+
"chat": [
|
111 |
+
{"name": "Zephyr-7B-beta", "size": "4.2GB", "description": "HuggingFace chat model"},
|
112 |
+
{"name": "Neural-Chat-7B", "size": "4.1GB", "description": "Intel optimized"}
|
113 |
+
]
|
114 |
+
},
|
115 |
+
"moderate": { # 7-8GB
|
116 |
+
"general": [
|
117 |
+
{"name": "Llama-2-7B-Chat", "size": "3.5GB", "description": "Meta's popular chat model"},
|
118 |
+
{"name": "Mistral-7B-Instruct-v0.2", "size": "4.1GB", "description": "Latest Mistral instruct"},
|
119 |
+
{"name": "Qwen-7B-Chat", "size": "4.0GB", "description": "Alibaba's multilingual"}
|
120 |
+
],
|
121 |
+
"code": [
|
122 |
+
{"name": "CodeLlama-7B-Instruct", "size": "3.8GB", "description": "Instruction-tuned CodeLlama"},
|
123 |
+
{"name": "WizardCoder-7B", "size": "4.0GB", "description": "Enhanced coding abilities"},
|
124 |
+
{"name": "Phind-CodeLlama-34B-v2", "size": "4.2GB", "description": "4-bit quantized version"}
|
125 |
+
],
|
126 |
+
"reasoning": [
|
127 |
+
{"name": "WizardMath-7B", "size": "4.0GB", "description": "Mathematical reasoning"},
|
128 |
+
{"name": "MetaMath-7B", "size": "3.9GB", "description": "Math problem solving"}
|
129 |
+
]
|
130 |
+
},
|
131 |
+
"good": { # 9-16GB
|
132 |
+
"general": [
|
133 |
+
{"name": "Llama-2-13B-Chat", "size": "7.3GB", "description": "Larger Llama variant"},
|
134 |
+
{"name": "Vicuna-13B-v1.5", "size": "7.2GB", "description": "Enhanced Vicuna"},
|
135 |
+
{"name": "OpenChat-3.5", "size": "7.1GB", "description": "High-quality chat model"}
|
136 |
+
],
|
137 |
+
"code": [
|
138 |
+
{"name": "CodeLlama-13B-Instruct", "size": "7.3GB", "description": "Larger code model"},
|
139 |
+
{"name": "WizardCoder-15B", "size": "8.2GB", "description": "Advanced coding"},
|
140 |
+
{"name": "StarCoder-15B", "size": "8.5GB", "description": "Large code model"}
|
141 |
+
],
|
142 |
+
"multimodal": [
|
143 |
+
{"name": "LLaVA-7B", "size": "7.0GB", "description": "Vision + language"},
|
144 |
+
{"name": "MiniGPT-4-7B", "size": "6.8GB", "description": "Multimodal chat"}
|
145 |
+
],
|
146 |
+
"reasoning": [
|
147 |
+
{"name": "WizardMath-13B", "size": "7.3GB", "description": "Advanced math"},
|
148 |
+
{"name": "Orca-2-13B", "size": "7.4GB", "description": "Microsoft reasoning"}
|
149 |
+
]
|
150 |
+
},
|
151 |
+
"high": { # 17-32GB
|
152 |
+
"general": [
|
153 |
+
{"name": "Mixtral-8x7B-Instruct-v0.1", "size": "26.9GB", "description": "Mixture of experts"},
|
154 |
+
{"name": "Llama-2-70B-Chat", "size": "38.0GB", "description": "8-bit quantized"},
|
155 |
+
{"name": "Yi-34B-Chat", "size": "19.5GB", "description": "01.AI's large model"}
|
156 |
+
],
|
157 |
+
"code": [
|
158 |
+
{"name": "CodeLlama-34B-Instruct", "size": "19.0GB", "description": "Large code specialist"},
|
159 |
+
{"name": "DeepSeek-Coder-33B", "size": "18.5GB", "description": "DeepSeek's coder"},
|
160 |
+
{"name": "WizardCoder-34B", "size": "19.2GB", "description": "Enterprise coding"}
|
161 |
+
],
|
162 |
+
"reasoning": [
|
163 |
+
{"name": "WizardMath-70B", "size": "38.5GB", "description": "8-bit quantized math"},
|
164 |
+
{"name": "MetaMath-70B", "size": "38.0GB", "description": "8-bit math reasoning"}
|
165 |
+
]
|
166 |
+
},
|
167 |
+
"ultra_high": { # >32GB
|
168 |
+
"general": [
|
169 |
+
{"name": "Llama-2-70B", "size": "130GB", "description": "Full precision"},
|
170 |
+
{"name": "Mixtral-8x22B", "size": "176GB", "description": "Latest mixture model"},
|
171 |
+
{"name": "Qwen-72B", "size": "145GB", "description": "Alibaba's flagship"}
|
172 |
+
],
|
173 |
+
"code": [
|
174 |
+
{"name": "CodeLlama-34B", "size": "68GB", "description": "Full precision code"},
|
175 |
+
{"name": "DeepSeek-Coder-33B", "size": "66GB", "description": "Full precision coding"}
|
176 |
+
],
|
177 |
+
"reasoning": [
|
178 |
+
{"name": "WizardMath-70B", "size": "130GB", "description": "Full precision math"},
|
179 |
+
{"name": "Goat-70B", "size": "132GB", "description": "Arithmetic reasoning"}
|
180 |
+
]
|
181 |
}
|
182 |
+
}
|
183 |
+
|
184 |
+
# Enhanced LLM recommendation with performance tiers
|
185 |
+
def recommend_llm(ram_str) -> Tuple[str, str, str, Dict[str, List[Dict]]]:
|
186 |
+
"""Returns (recommendation, performance_tier, additional_info, detailed_models)"""
|
187 |
+
ram = extract_numeric_ram(ram_str)
|
188 |
+
|
189 |
+
if ram is None:
|
190 |
+
return ("โช Check exact specs or test with quantized models.",
|
191 |
+
"Unknown",
|
192 |
+
"Verify RAM specifications",
|
193 |
+
{})
|
194 |
+
|
195 |
+
if ram <= 2:
|
196 |
+
models = LLM_DATABASE["ultra_low"]
|
197 |
+
return ("๐ธ Ultra-lightweight models - basic NLP tasks",
|
198 |
+
"Ultra Low",
|
199 |
+
"Mobile-optimized, simple tasks, limited context",
|
200 |
+
models)
|
201 |
+
elif ram <= 4:
|
202 |
+
models = LLM_DATABASE["low"]
|
203 |
+
return ("๐ธ Small language models - decent capabilities",
|
204 |
+
"Low",
|
205 |
+
"Basic chat, simple reasoning, text classification",
|
206 |
+
models)
|
207 |
+
elif ram <= 6:
|
208 |
+
models = LLM_DATABASE["moderate_low"]
|
209 |
+
return ("๐ Mid-range models - good general performance",
|
210 |
+
"Moderate-Low",
|
211 |
+
"Solid reasoning, coding help, longer conversations",
|
212 |
+
models)
|
213 |
+
elif ram <= 8:
|
214 |
+
models = LLM_DATABASE["moderate"]
|
215 |
+
return ("๐ Strong 7B models - excellent capabilities",
|
216 |
+
"Moderate",
|
217 |
+
"Professional use, coding assistance, complex reasoning",
|
218 |
+
models)
|
219 |
+
elif ram <= 16:
|
220 |
+
models = LLM_DATABASE["good"]
|
221 |
+
return ("๐ข High-quality models - premium performance",
|
222 |
+
"Good",
|
223 |
+
"Advanced tasks, multimodal support, research use",
|
224 |
+
models)
|
225 |
+
elif ram <= 32:
|
226 |
+
models = LLM_DATABASE["high"]
|
227 |
+
return ("๐ต Premium models - professional grade",
|
228 |
+
"High",
|
229 |
+
"Enterprise ready, complex reasoning, specialized tasks",
|
230 |
+
models)
|
231 |
+
else:
|
232 |
+
models = LLM_DATABASE["ultra_high"]
|
233 |
+
return ("๐ต Top-tier models - enterprise capabilities",
|
234 |
+
"Ultra High",
|
235 |
+
"Research grade, maximum performance, domain expertise",
|
236 |
+
models)
|
237 |
+
|
238 |
+
# Enhanced OS detection with better icons
|
239 |
+
def get_os_info(os_name) -> Tuple[str, str]:
|
240 |
+
"""Returns (icon, clean_name)"""
|
241 |
+
if pd.isna(os_name):
|
242 |
+
return "๐ป", "Not specified"
|
243 |
+
|
244 |
+
os = str(os_name).lower()
|
245 |
+
if "windows" in os:
|
246 |
+
return "๐ช", os_name
|
247 |
+
elif "mac" in os or "darwin" in os:
|
248 |
+
return "๐", os_name
|
249 |
+
elif "linux" in os or "ubuntu" in os:
|
250 |
+
return "๐ง", os_name
|
251 |
+
elif "android" in os:
|
252 |
+
return "๐ค", os_name
|
253 |
+
elif "ios" in os:
|
254 |
+
return "๐ฑ", os_name
|
255 |
+
else:
|
256 |
+
return "๐ป", os_name
|
257 |
+
|
258 |
+
# Performance visualization
|
259 |
+
def create_performance_chart(df):
|
260 |
+
"""Create a performance distribution chart"""
|
261 |
+
laptop_rams = df["Laptop RAM"].apply(extract_numeric_ram).dropna()
|
262 |
+
mobile_rams = df["Mobile RAM"].apply(extract_numeric_ram).dropna()
|
263 |
+
|
264 |
+
fig = go.Figure()
|
265 |
+
|
266 |
+
fig.add_trace(go.Histogram(
|
267 |
+
x=laptop_rams,
|
268 |
+
name="Laptop RAM",
|
269 |
+
opacity=0.7,
|
270 |
+
nbinsx=10
|
271 |
+
))
|
272 |
+
|
273 |
+
fig.add_trace(go.Histogram(
|
274 |
+
x=mobile_rams,
|
275 |
+
name="Mobile RAM",
|
276 |
+
opacity=0.7,
|
277 |
+
nbinsx=10
|
278 |
+
))
|
279 |
+
|
280 |
+
fig.update_layout(
|
281 |
+
title="RAM Distribution Across Devices",
|
282 |
+
xaxis_title="RAM (GB)",
|
283 |
+
yaxis_title="Number of Students",
|
284 |
+
barmode='overlay',
|
285 |
+
height=400
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
286 |
)
|
287 |
+
|
288 |
+
return fig
|
289 |
+
|
290 |
+
# Enhanced model details display function
|
291 |
+
def display_model_categories(models_dict: Dict[str, List[Dict]], ram_gb: int):
|
292 |
+
"""Display models organized by category with download sizes"""
|
293 |
+
if not models_dict:
|
294 |
+
return
|
295 |
+
|
296 |
+
st.markdown(f"### ๐ฏ Recommended Models for {ram_gb}GB RAM:")
|
297 |
+
|
298 |
+
for category, model_list in models_dict.items():
|
299 |
+
if model_list:
|
300 |
+
with st.expander(f"๐ {category.replace('_', ' ').title()} Models"):
|
301 |
+
for model in model_list[:8]: # Limit to top 8 per category
|
302 |
+
col1, col2, col3 = st.columns([3, 1, 2])
|
303 |
+
with col1:
|
304 |
+
st.markdown(f"**{model['name']}**")
|
305 |
+
with col2:
|
306 |
+
st.markdown(f"`{model['size']}`")
|
307 |
+
with col3:
|
308 |
+
st.markdown(f"*{model['description']}*")
|
309 |
+
|
310 |
+
# Main App
|
311 |
+
st.title("๐ง LLM Compatibility Advisor")
|
312 |
+
st.markdown("Get personalized recommendations from **150+ popular open source AI models** with download sizes!")
|
313 |
+
|
314 |
+
# Load data
|
315 |
+
df, error = load_data()
|
316 |
+
|
317 |
+
if error:
|
318 |
+
st.error(error)
|
319 |
+
st.info("Please ensure the Excel file 'BITS_INTERNS.xlsx' is in the same directory as this script.")
|
320 |
+
st.stop()
|
321 |
+
|
322 |
+
if df is None or df.empty:
|
323 |
+
st.error("No data found in the Excel file.")
|
324 |
+
st.stop()
|
325 |
+
|
326 |
+
# Sidebar filters and info
|
327 |
+
with st.sidebar:
|
328 |
+
st.header("๐ Filters & Info")
|
329 |
+
|
330 |
+
# Performance tier filter
|
331 |
+
performance_filter = st.multiselect(
|
332 |
+
"Filter by Performance Tier:",
|
333 |
+
["Ultra Low", "Low", "Moderate-Low", "Moderate", "Good", "High", "Ultra High", "Unknown"],
|
334 |
+
default=["Ultra Low", "Low", "Moderate-Low", "Moderate", "Good", "High", "Ultra High", "Unknown"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
335 |
)
|
336 |
+
|
337 |
+
# Model category filter
|
338 |
+
st.subheader("Model Categories")
|
339 |
+
show_categories = st.multiselect(
|
340 |
+
"Show specific categories:",
|
341 |
+
["general", "code", "chat", "reasoning", "multimodal"],
|
342 |
+
default=["general", "code", "chat"]
|
343 |
+
)
|
344 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
345 |
st.markdown("---")
|
346 |
+
st.markdown("### ๐ Quick Stats")
|
347 |
+
st.metric("Total Students", len(df))
|
348 |
+
st.metric("Popular Models", "150+")
|
349 |
+
|
350 |
+
# Calculate average RAM
|
351 |
+
avg_laptop_ram = df["Laptop RAM"].apply(extract_numeric_ram).mean()
|
352 |
+
avg_mobile_ram = df["Mobile RAM"].apply(extract_numeric_ram).mean()
|
353 |
+
|
354 |
+
if not pd.isna(avg_laptop_ram):
|
355 |
+
st.metric("Avg Laptop RAM", f"{avg_laptop_ram:.1f} GB")
|
356 |
+
if not pd.isna(avg_mobile_ram):
|
357 |
+
st.metric("Avg Mobile RAM", f"{avg_mobile_ram:.1f} GB")
|
358 |
+
|
359 |
+
# User selection with search
|
360 |
+
st.subheader("๐ค Individual Student Analysis")
|
361 |
+
selected_user = st.selectbox(
|
362 |
+
"Choose a student:",
|
363 |
+
options=[""] + list(df["Full Name"].unique()),
|
364 |
+
format_func=lambda x: "Select a student..." if x == "" else x
|
365 |
+
)
|
366 |
+
|
367 |
+
if selected_user:
|
368 |
+
user_data = df[df["Full Name"] == selected_user].iloc[0]
|
369 |
+
|
370 |
+
# Enhanced user display
|
371 |
+
col1, col2 = st.columns(2)
|
372 |
+
|
373 |
+
with col1:
|
374 |
+
st.markdown("### ๐ป Laptop Configuration")
|
375 |
+
laptop_os_icon, laptop_os_name = get_os_info(user_data.get('Laptop Operating System'))
|
376 |
+
laptop_ram = user_data.get('Laptop RAM', 'Not specified')
|
377 |
+
laptop_rec, laptop_tier, laptop_info, laptop_models = recommend_llm(laptop_ram)
|
378 |
+
laptop_ram_gb = extract_numeric_ram(laptop_ram) or 0
|
379 |
+
|
380 |
+
st.markdown(f"**OS:** {laptop_os_icon} {laptop_os_name}")
|
381 |
+
st.markdown(f"**RAM:** {laptop_ram}")
|
382 |
+
st.markdown(f"**Performance Tier:** {laptop_tier}")
|
383 |
+
|
384 |
+
st.success(f"**๐ก Recommendation:** {laptop_rec}")
|
385 |
+
st.info(f"**โน๏ธ Notes:** {laptop_info}")
|
386 |
+
|
387 |
+
# Display detailed models for laptop
|
388 |
+
if laptop_models:
|
389 |
+
filtered_models = {k: v for k, v in laptop_models.items() if k in show_categories}
|
390 |
+
display_model_categories(filtered_models, laptop_ram_gb)
|
391 |
+
|
392 |
+
with col2:
|
393 |
+
st.markdown("### ๐ฑ Mobile Configuration")
|
394 |
+
mobile_os_icon, mobile_os_name = get_os_info(user_data.get('Mobile Operating System'))
|
395 |
+
mobile_ram = user_data.get('Mobile RAM', 'Not specified')
|
396 |
+
mobile_rec, mobile_tier, mobile_info, mobile_models = recommend_llm(mobile_ram)
|
397 |
+
mobile_ram_gb = extract_numeric_ram(mobile_ram) or 0
|
398 |
+
|
399 |
+
st.markdown(f"**OS:** {mobile_os_icon} {mobile_os_name}")
|
400 |
+
st.markdown(f"**RAM:** {mobile_ram}")
|
401 |
+
st.markdown(f"**Performance Tier:** {mobile_tier}")
|
402 |
+
|
403 |
+
st.success(f"**๐ก Recommendation:** {mobile_rec}")
|
404 |
+
st.info(f"**โน๏ธ Notes:** {mobile_info}")
|
405 |
+
|
406 |
+
# Display detailed models for mobile
|
407 |
+
if mobile_models:
|
408 |
+
filtered_models = {k: v for k, v in mobile_models.items() if k in show_categories}
|
409 |
+
display_model_categories(filtered_models, mobile_ram_gb)
|
410 |
+
|
411 |
+
# Batch Analysis Section
|
412 |
+
st.markdown("---")
|
413 |
+
st.header("๐ Batch Analysis & Insights")
|
414 |
+
|
415 |
+
# Create enhanced batch table
|
416 |
+
df_display = df[["Full Name", "Laptop RAM", "Mobile RAM"]].copy()
|
417 |
+
|
418 |
+
# Add recommendations and performance tiers
|
419 |
+
laptop_recommendations = df["Laptop RAM"].apply(lambda x: recommend_llm(x)[0])
|
420 |
+
mobile_recommendations = df["Mobile RAM"].apply(lambda x: recommend_llm(x)[0])
|
421 |
+
laptop_tiers = df["Laptop RAM"].apply(lambda x: recommend_llm(x)[1])
|
422 |
+
mobile_tiers = df["Mobile RAM"].apply(lambda x: recommend_llm(x)[1])
|
423 |
+
|
424 |
+
df_display["Laptop LLM"] = laptop_recommendations
|
425 |
+
df_display["Mobile LLM"] = mobile_recommendations
|
426 |
+
df_display["Laptop Tier"] = laptop_tiers
|
427 |
+
df_display["Mobile Tier"] = mobile_tiers
|
428 |
+
|
429 |
+
# Filter based on sidebar selections (RAM range filter removed)
|
430 |
+
mask = (laptop_tiers.isin(performance_filter) | mobile_tiers.isin(performance_filter))
|
431 |
+
|
432 |
+
df_filtered = df_display[mask]
|
433 |
+
|
434 |
+
# Display filtered table
|
435 |
+
st.subheader(f"๐ Student Recommendations ({len(df_filtered)} students)")
|
436 |
+
st.dataframe(
|
437 |
+
df_filtered,
|
438 |
+
use_container_width=True,
|
439 |
+
column_config={
|
440 |
+
"Full Name": st.column_config.TextColumn("Student Name", width="medium"),
|
441 |
+
"Laptop RAM": st.column_config.TextColumn("Laptop RAM", width="small"),
|
442 |
+
"Mobile RAM": st.column_config.TextColumn("Mobile RAM", width="small"),
|
443 |
+
"Laptop LLM": st.column_config.TextColumn("Laptop Recommendation", width="large"),
|
444 |
+
"Mobile LLM": st.column_config.TextColumn("Mobile Recommendation", width="large"),
|
445 |
+
"Laptop Tier": st.column_config.TextColumn("L-Tier", width="small"),
|
446 |
+
"Mobile Tier": st.column_config.TextColumn("M-Tier", width="small"),
|
447 |
}
|
448 |
+
)
|
449 |
+
|
450 |
+
# Performance distribution chart
|
451 |
+
if len(df) > 1:
|
452 |
+
st.subheader("๐ RAM Distribution Analysis")
|
453 |
+
fig = create_performance_chart(df)
|
454 |
+
st.plotly_chart(fig, use_container_width=True)
|
455 |
+
|
456 |
+
# Performance tier summary
|
457 |
+
st.subheader("๐ฏ Performance Tier Summary")
|
458 |
+
tier_col1, tier_col2 = st.columns(2)
|
459 |
+
|
460 |
+
with tier_col1:
|
461 |
+
st.markdown("**Laptop Performance Tiers:**")
|
462 |
+
laptop_tier_counts = laptop_tiers.value_counts()
|
463 |
+
for tier, count in laptop_tier_counts.items():
|
464 |
+
percentage = (count / len(laptop_tiers)) * 100
|
465 |
+
st.write(f"โข {tier}: {count} students ({percentage:.1f}%)")
|
466 |
+
|
467 |
+
with tier_col2:
|
468 |
+
st.markdown("**Mobile Performance Tiers:**")
|
469 |
+
mobile_tier_counts = mobile_tiers.value_counts()
|
470 |
+
for tier, count in mobile_tier_counts.items():
|
471 |
+
percentage = (count / len(mobile_tier_counts)) * 100
|
472 |
+
st.write(f"โข {tier}: {count} students ({percentage:.1f}%)")
|
473 |
+
|
474 |
+
# Model Explorer Section
|
475 |
+
st.markdown("---")
|
476 |
+
st.header("๐ Popular Model Explorer")
|
477 |
+
|
478 |
+
explorer_col1, explorer_col2 = st.columns(2)
|
479 |
+
|
480 |
+
with explorer_col1:
|
481 |
+
selected_ram_range = st.selectbox(
|
482 |
+
"Select RAM range to explore models:",
|
483 |
+
["โค2GB (Ultra Low)", "3-4GB (Low)", "5-6GB (Moderate-Low)",
|
484 |
+
"7-8GB (Moderate)", "9-16GB (Good)", "17-32GB (High)", ">32GB (Ultra High)"]
|
485 |
+
)
|
486 |
|
487 |
+
with explorer_col2:
|
488 |
+
selected_category = st.selectbox(
|
489 |
+
"Select model category:",
|
490 |
+
["general", "code", "chat", "reasoning", "multimodal"]
|
491 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
492 |
|
493 |
+
# Map selection to database key
|
494 |
+
ram_mapping = {
|
495 |
+
"โค2GB (Ultra Low)": "ultra_low",
|
496 |
+
"3-4GB (Low)": "low",
|
497 |
+
"5-6GB (Moderate-Low)": "moderate_low",
|
498 |
+
"7-8GB (Moderate)": "moderate",
|
499 |
+
"9-16GB (Good)": "good",
|
500 |
+
"17-32GB (High)": "high",
|
501 |
+
">32GB (Ultra High)": "ultra_high"
|
502 |
+
}
|
503 |
+
|
504 |
+
selected_ram_key = ram_mapping[selected_ram_range]
|
505 |
+
if selected_ram_key in LLM_DATABASE and selected_category in LLM_DATABASE[selected_ram_key]:
|
506 |
+
models = LLM_DATABASE[selected_ram_key][selected_category]
|
507 |
+
|
508 |
+
st.subheader(f"๐ฏ {selected_category.title()} Models for {selected_ram_range}")
|
509 |
+
|
510 |
+
# Display models in a detailed table
|
511 |
+
for model in models:
|
512 |
+
with st.container():
|
513 |
+
col1, col2, col3 = st.columns([3, 1, 3])
|
514 |
+
with col1:
|
515 |
+
st.markdown(f"### {model['name']}")
|
516 |
+
with col2:
|
517 |
+
st.markdown(f"**{model['size']}**")
|
518 |
+
st.caption("Download Size")
|
519 |
+
with col3:
|
520 |
+
st.markdown(f"*{model['description']}*")
|
521 |
+
# Add download suggestion
|
522 |
+
if "Llama" in model['name']:
|
523 |
+
st.caption("๐ Available on Hugging Face & Ollama")
|
524 |
+
elif "Mistral" in model['name']:
|
525 |
+
st.caption("๐ Available on Hugging Face & Mistral AI")
|
526 |
+
elif "Gemma" in model['name']:
|
527 |
+
st.caption("๐ Available on Hugging Face & Google")
|
528 |
+
else:
|
529 |
+
st.caption("๐ Available on Hugging Face")
|
530 |
+
st.markdown("---")
|
531 |
+
else:
|
532 |
+
st.info(f"No {selected_category} models available for {selected_ram_range}")
|
533 |
+
|
534 |
+
# Enhanced reference guide
|
535 |
+
with st.expander("๐ Model Guide & Download Information"):
|
536 |
st.markdown("""
|
537 |
+
## ๐ Popular Models by Category
|
538 |
+
|
539 |
+
### ๐ฏ **General Purpose Champions**
|
540 |
+
- **Llama-2 Series**: Meta's flagship models (7B, 13B, 70B)
|
541 |
+
- **Mistral Series**: Excellent efficiency and performance
|
542 |
+
- **Gemma**: Google's efficient models (2B, 7B)
|
543 |
+
- **Phi**: Microsoft's compact powerhouses
|
544 |
+
|
545 |
+
### ๐ป **Code Specialists**
|
546 |
+
- **CodeLlama**: Meta's dedicated coding models
|
547 |
+
- **StarCoder**: BigCode's programming experts
|
548 |
+
- **WizardCoder**: Enhanced coding capabilities
|
549 |
+
- **DeepSeek-Coder**: Chinese tech giant's coder
|
550 |
+
|
551 |
+
### ๐ฌ **Chat Optimized**
|
552 |
+
- **Vicuna**: UC Berkeley's ChatGPT alternative
|
553 |
+
- **Zephyr**: HuggingFace's chat specialist
|
554 |
+
- **OpenChat**: High-quality conversation models
|
555 |
+
- **Neural-Chat**: Intel-optimized chat models
|
556 |
+
|
557 |
+
### ๐งฎ **Reasoning Masters**
|
558 |
+
- **WizardMath**: Mathematical problem solving
|
559 |
+
- **MetaMath**: Advanced arithmetic reasoning
|
560 |
+
- **Orca-2**: Microsoft's reasoning specialist
|
561 |
+
- **Goat**: Specialized arithmetic model
|
562 |
+
|
563 |
+
### ๐๏ธ **Multimodal Models**
|
564 |
+
- **LLaVA**: Large Language and Vision Assistant
|
565 |
+
- **MiniGPT-4**: Multimodal conversational AI
|
566 |
+
|
567 |
+
## ๐พ Download Size Reference
|
568 |
+
|
569 |
+
| Model Size | FP16 | 8-bit | 4-bit | Use Case |
|
570 |
+
|------------|------|-------|-------|----------|
|
571 |
+
| **1-3B** | 2-6GB | 1-3GB | 0.5-1.5GB | Mobile, Edge |
|
572 |
+
| **7B** | 13GB | 7GB | 3.5GB | Desktop, Laptop |
|
573 |
+
| **13B** | 26GB | 13GB | 7GB | Workstation |
|
574 |
+
| **30-34B** | 60GB | 30GB | 15GB | Server, Cloud |
|
575 |
+
| **70B** | 140GB | 70GB | 35GB | High-end Server |
|
576 |
+
|
577 |
+
## ๐ ๏ธ Where to Download
|
578 |
+
|
579 |
+
### **Primary Sources**
|
580 |
+
- **๐ค Hugging Face**: Largest repository with 400,000+ models
|
581 |
+
- **๐ฆ Ollama**: Simple CLI tool for local deployment
|
582 |
+
- **๐ฆ LM Studio**: User-friendly GUI for model management
|
583 |
+
|
584 |
+
### **Quantized Formats**
|
585 |
+
- **GGUF**: Best for CPU inference (llama.cpp)
|
586 |
+
- **GPTQ**: GPU-optimized quantization
|
587 |
+
- **AWQ**: Advanced weight quantization
|
588 |
+
|
589 |
+
### **Download Tips**
|
590 |
+
- Use `git lfs` for large models from Hugging Face
|
591 |
+
- Consider bandwidth and storage before downloading
|
592 |
+
- Start with 4-bit quantized versions for testing
|
593 |
+
- Use `ollama pull model_name` for easiest setup
|
594 |
+
|
595 |
+
## ๐ง Optimization Strategies
|
596 |
+
|
597 |
+
### **Memory Reduction**
|
598 |
+
- **4-bit quantization**: 75% memory reduction
|
599 |
+
- **8-bit quantization**: 50% memory reduction
|
600 |
+
- **CPU offloading**: Use system RAM for overflow
|
601 |
+
|
602 |
+
### **Speed Optimization**
|
603 |
+
- **GPU acceleration**: CUDA, ROCm, Metal
|
604 |
+
- **Batch processing**: Process multiple requests
|
605 |
+
- **Context caching**: Reuse computations
|
606 |
+
""")
|
607 |
+
|
608 |
+
# Footer with updated resources
|
609 |
+
st.markdown("---")
|
610 |
+
st.markdown("""
|
611 |
+
### ๐ Essential Download & Deployment Tools
|
612 |
+
|
613 |
+
**๐ฆ Easy Model Deployment:**
|
614 |
+
- [**Ollama**](https://ollama.ai/) โ `curl -fsSL https://ollama.ai/install.sh | sh`
|
615 |
+
- [**LM Studio**](https://lmstudio.ai/) โ Drag-and-drop GUI for running models locally
|
616 |
+
- [**GPT4All**](https://gpt4all.io/) โ Cross-platform desktop app for local LLMs
|
617 |
+
|
618 |
+
**๐ค Model Repositories:**
|
619 |
+
- [**Hugging Face Hub**](https://huggingface.co/models) โ Filter by model size, task, and license
|
620 |
+
- [**TheBloke's Quantizations**](https://huggingface.co/TheBloke) โ Pre-quantized models in GGUF/GPTQ format
|
621 |
+
- [**Awesome LLM**](https://github.com/Hannibal046/Awesome-LLMs) โ Curated list of models and resources
|
622 |
+
|
623 |
+
|
624 |
+
---
|
625 |
+
""")
|