qwerty45-uiop commited on
Commit
851786a
·
verified ·
1 Parent(s): 1384952

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +71 -11
src/streamlit_app.py CHANGED
@@ -131,22 +131,82 @@ def calculate_quantized_size(base_size_str, quant_format):
131
  LLM_DATABASE = {
132
  "ultra_low": { # ≤2GB
133
  "general": [
134
- {"name": "TinyLlama-1.1B-Chat", "size": "637MB", "description": "Compact chat model", "parameters": "1.1B", "context": "2K"},
135
- {"name": "DistilBERT-base", "size": "268MB", "description": "Efficient BERT variant", "parameters": "66M", "context": "512"},
136
- {"name": "all-MiniLM-L6-v2", "size": "91MB", "description": "Sentence embeddings", "parameters": "22M", "context": "256"},
137
- {"name": "OpenELM-270M", "size": "540MB", "description": "Apple's efficient model", "parameters": "270M", "context": "2K"}
138
- ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  "code": [
140
- {"name": "CodeT5-small", "size": "242MB", "description": "Code generation", "parameters": "60M", "context": "512"},
141
- {"name": "Replit-code-v1-3B", "size": "1.2GB", "description": "Code completion", "parameters": "3B", "context": "4K"}
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  ]
143
  },
144
  "low": { # 3-4GB
145
  "general": [
146
- {"name": "Phi-1.5", "size": "2.8GB", "description": "Microsoft's efficient model", "parameters": "1.3B", "context": "2K"},
147
- {"name": "Gemma-2B", "size": "1.4GB", "description": "Google's compact model", "parameters": "2B", "context": "8K"},
148
- {"name": "OpenLLaMA-3B", "size": "2.1GB", "description": "Open source LLaMA", "parameters": "3B", "context": "2K"},
149
- {"name": "StableLM-3B", "size": "2.2GB", "description": "Stability AI model", "parameters": "3B", "context": "4K"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  ],
151
  "code": [
152
  {"name": "CodeGen-2B", "size": "1.8GB", "description": "Salesforce code model", "parameters": "2B", "context": "2K"},
 
131
  LLM_DATABASE = {
132
  "ultra_low": { # ≤2GB
133
  "general": [
134
+ { name: "TinyLlama-1.1B-Chat", size: "2.2GB", description: "Ultra-compact conversational model" },
135
+ { name: "DistilBERT-base", size: "0.3GB", description: "Efficient BERT variant for NLP tasks" },
136
+ { name: "all-MiniLM-L6-v2", size: "0.1GB", description: "Sentence embeddings specialist" },
137
+ { name: "OPT-125M", size: "0.5GB", description: "Meta's lightweight language model" },
138
+ { name: "GPT-Neo-125M", size: "0.5GB", description: "EleutherAI's compact model" },
139
+ { name: "DistilGPT-2", size: "0.3GB", description: "Distilled version of GPT-2" },
140
+ { name: "MobileBERT", size: "0.2GB", description: "Google's mobile-optimized BERT" },
141
+ { name: "ALBERT-base", size: "0.4GB", description: "A Lite BERT for self-supervised learning" },
142
+ { name: "RoBERTa-base", size: "0.5GB", description: "Robustly optimized BERT pretraining" },
143
+ { name: "ELECTRA-small", size: "0.2GB", description: "Efficiently learning encoder representations" },
144
+ { name: "MobileLLaMA-1B", size: "1.0GB", description: "Mobile-optimized Llama variant" },
145
+ { name: "GPT-2-small", size: "0.5GB", description: "OpenAI's original small model" },
146
+ { name: "T5-small", size: "0.2GB", description: "Text-to-Text Transfer Transformer" },
147
+ { name: "FLAN-T5-small", size: "0.3GB", description: "Instruction-tuned T5" },
148
+ { name: "UL2-small", size: "0.8GB", description: "Unified Language Learner" },
149
+ { name: "DeBERTa-v3-small", size: "0.4GB", description: "Microsoft's enhanced BERT" },
150
+ { name: "CANINE-s", size: "0.5GB", description: "Character-level model" },
151
+ { name: "Longformer-base", size: "0.6GB", description: "Long document understanding" },
152
+ { name: "BigBird-small", size: "0.7GB", description: "Sparse attention model" },
153
+ { name: "Reformer-small", size: "0.3GB", description: "Memory-efficient transformer" },
154
+ { name: "FNet-small", size: "0.4GB", description: "Fourier transform model" },
155
+ { name: "Synthesizer-small", size: "0.3GB", description: "Synthetic attention patterns" },
156
+ { name: "GPT-Neo-1.3B", size: "1.3GB", description: "EleutherAI's 1.3B model" },
157
+ { name: "OPT-350M", size: "0.7GB", description: "Meta's 350M parameter model" },
158
+ { name: "BLOOM-560M", size: "1.1GB", description: "BigScience's small multilingual" }
159
+ ],
160
  "code": [
161
+ { name: "CodeT5-small", size: "0.3GB", description: "Compact code generation model" },
162
+ { name: "Replit-code-v1-3B", size: "1.2GB", description: "Code completion specialist" },
163
+ { name: "UnixCoder-base", size: "0.5GB", description: "Microsoft's code understanding model" },
164
+ { name: "CodeBERT-base", size: "0.5GB", description: "Bimodal pre-trained model for programming" },
165
+ { name: "GraphCodeBERT-base", size: "0.5GB", description: "Pre-trained model with data flow" },
166
+ { name: "CodeT5-base", size: "0.9GB", description: "Identifier-aware unified pre-trained encoder-decoder" },
167
+ { name: "PyCodeGPT-110M", size: "0.4GB", description: "Python code generation specialist" },
168
+ { name: "CodeParrot-110M", size: "0.4GB", description: "GPT-2 model trained on Python code" },
169
+ { name: "CodeSearchNet-small", size: "0.6GB", description: "Code search and understanding" },
170
+ { name: "CuBERT-small", size: "0.4GB", description: "Google's code understanding" },
171
+ { name: "CodeGPT-small", size: "0.5GB", description: "Microsoft's code GPT" },
172
+ { name: "PLBART-small", size: "0.7GB", description: "Programming language BART" },
173
+ { name: "TreeBERT-small", size: "0.6GB", description: "Tree-based code representation" },
174
+ { name: "CoTexT-small", size: "0.5GB", description: "Code and text pre-training" },
175
+ { name: "SynCoBERT-small", size: "0.6GB", description: "Syntax-guided code BERT" },
176
  ]
177
  },
178
  "low": { # 3-4GB
179
  "general": [
180
+ { name: "Phi-1.5", size: "2.8GB", description: "Microsoft's efficient reasoning model" },
181
+ { name: "Gemma-2B", size: "1.4GB", description: "Google's compact foundation model" },
182
+ { name: "OpenLLaMA-3B", size: "2.1GB", description: "Open source LLaMA reproduction" },
183
+ { name: "RedPajama-3B", size: "2.0GB", description: "Together AI's open model" },
184
+ { name: "StableLM-3B", size: "2.3GB", description: "Stability AI's language model" },
185
+ { name: "Pythia-2.8B", size: "2.8GB", description: "EleutherAI's training suite model" },
186
+ { name: "GPT-Neo-2.7B", size: "2.7GB", description: "EleutherAI's open GPT model" },
187
+ { name: "OPT-2.7B", size: "2.7GB", description: "Meta's open pre-trained transformer" },
188
+ { name: "BLOOM-3B", size: "3.0GB", description: "BigScience's multilingual model" },
189
+ { name: "GPT-J-6B", size: "3.5GB", description: "EleutherAI's 6B parameter model" },
190
+ { name: "Cerebras-GPT-2.7B", size: "2.7GB", description: "Cerebras Systems' open model" },
191
+ { name: "PaLM-2B", size: "2.0GB", description: "Google's Pathways Language Model" },
192
+ { name: "LaMDA-2B", size: "2.2GB", description: "Google's Language Model for Dialogue" },
193
+ { name: "FairSeq-2.7B", size: "2.7GB", description: "Facebook's sequence-to-sequence toolkit" },
194
+ { name: "Megatron-2.5B", size: "2.5GB", description: "NVIDIA's transformer model" },
195
+ { name: "GLM-2B", size: "2.0GB", description: "General Language Model pretraining" },
196
+ { name: "CPM-2", size: "2.6GB", description: "Chinese Pre-trained Language Model" },
197
+ { name: "mT5-small", size: "1.2GB", description: "Multilingual Text-to-Text Transfer" },
198
+ { name: "ByT5-small", size: "1.5GB", description: "Byte-level Text-to-Text Transfer" },
199
+ { name: "Switch-2B", size: "2.0GB", description: "Switch Transformer sparse model" },
200
+ { name: "GPT-NeoX-2B", size: "2.0GB", description: "EleutherAI's NeoX architecture" },
201
+ { name: "OPT-1.3B", size: "1.3GB", description: "Meta's 1.3B parameter model" },
202
+ { name: "BLOOM-1B7", size: "1.7GB", description: "BigScience's 1.7B model" },
203
+ { name: "Pythia-1.4B", size: "1.4GB", description: "EleutherAI's 1.4B model" },
204
+ { name: "StableLM-Alpha-3B", size: "2.2GB", description: "Stability AI's alpha model" },
205
+ { name: "OpenLLM-3B", size: "2.1GB", description: "Open-sourced language model" },
206
+ { name: "Dolly-v1-6B", size: "3.0GB", description: "Databricks' instruction model" },
207
+ { name: "GPT4All-J-6B", size: "3.2GB", description: "Nomic AI's assistant model" },
208
+ { name: "Vicuna-3B", size: "2.1GB", description: "UC Berkeley's 3B chat model" },
209
+ { name: "Alpaca-3B", size: "2.0GB", description: "Stanford's 3B instruction model" }
210
  ],
211
  "code": [
212
  {"name": "CodeGen-2B", "size": "1.8GB", "description": "Salesforce code model", "parameters": "2B", "context": "2K"},