qwerty45-uiop commited on
Commit
1d3d953
Β·
verified Β·
1 Parent(s): d42afee

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +32 -33
src/streamlit_app.py CHANGED
@@ -596,43 +596,42 @@ if selected_ram_key in LLM_DATABASE and selected_category in LLM_DATABASE[select
596
  st.subheader(f"🎯 {selected_category.title()} Models for {selected_ram_range}")
597
 
598
  # Display models in a detailed table
599
- # Inside the for loop displaying each model
600
- with st.container():
601
- col1, col2, col3 = st.columns([3, 1, 3])
602
- with col1:
603
- st.markdown(f"### {model['name']}")
604
- with col2:
605
- st.markdown(f"**{model['size']}**")
606
- st.caption("Download Size")
607
- with col3:
608
- st.markdown(f"*{model['description']}*")
609
- # Add download suggestion
610
- if "Llama" in model['name']:
611
- st.caption("πŸ”— Hugging Face & Ollama")
612
- elif "Mistral" in model['name']:
613
- st.caption("πŸ”— Hugging Face & Mistral AI")
614
- elif "Gemma" in model['name']:
615
- st.caption("πŸ”— Hugging Face & Google")
616
- else:
617
- st.caption("πŸ”— Hugging Face")
618
-
619
- # πŸ”½ Quantization size details
620
- fp16, q8, q4 = calculate_quantized_sizes(model['size'])
621
- with st.expander("πŸ’Ύ Quantized Size Estimates"):
622
- st.markdown(f"""
623
- | Format | Size |
624
- |--------|------|
625
- | FP16 (Full Precision) | **{fp16}** |
626
- | 8-bit Quantized | **{q8}** |
627
- | 4-bit Quantized | **{q4}** |
628
- """)
629
-
630
-
631
 
632
- st.markdown("---")
633
  else:
634
  st.info(f"No {selected_category} models available for {selected_ram_range}")
635
 
 
636
  # Enhanced reference guide
637
  with st.expander("πŸ“˜ Model Guide & Download Information"):
638
  st.markdown("""
 
596
  st.subheader(f"🎯 {selected_category.title()} Models for {selected_ram_range}")
597
 
598
  # Display models in a detailed table
599
+ for model in models:
600
+ with st.container():
601
+ col1, col2, col3 = st.columns([3, 1, 3])
602
+ with col1:
603
+ st.markdown(f"### {model['name']}")
604
+ with col2:
605
+ st.markdown(f"**{model['size']}**")
606
+ st.caption("Download Size")
607
+ with col3:
608
+ st.markdown(f"*{model['description']}*")
609
+ if "Llama" in model['name']:
610
+ st.caption("πŸ”— Available on Hugging Face & Ollama")
611
+ elif "Mistral" in model['name']:
612
+ st.caption("πŸ”— Available on Hugging Face & Mistral AI")
613
+ elif "Gemma" in model['name']:
614
+ st.caption("πŸ”— Available on Hugging Face & Google")
615
+ else:
616
+ st.caption("πŸ”— Available on Hugging Face")
617
+
618
+ # πŸ”½ Quantization size details
619
+ fp16, q8, q4 = calculate_quantized_sizes(model['size'])
620
+ with st.expander("πŸ’Ύ Quantized Size Estimates"):
621
+ st.markdown(f"""
622
+ | Format | Size |
623
+ |--------|------|
624
+ | FP16 (Full Precision) | **{fp16}** |
625
+ | 8-bit Quantized | **{q8}** |
626
+ | 4-bit Quantized | **{q4}** |
627
+ """)
628
+
629
+ st.markdown("---") # βœ… this belongs inside the if block
 
630
 
 
631
  else:
632
  st.info(f"No {selected_category} models available for {selected_ram_range}")
633
 
634
+
635
  # Enhanced reference guide
636
  with st.expander("πŸ“˜ Model Guide & Download Information"):
637
  st.markdown("""