Spaces:
Running
Running
Jeff Myers II
commited on
Commit
·
d6bce68
1
Parent(s):
064abbf
Update space
Browse files- Gemma_Model.py +9 -5
- requirements.txt +1 -2
Gemma_Model.py
CHANGED
@@ -1,4 +1,8 @@
|
|
1 |
-
from transformers import
|
|
|
|
|
|
|
|
|
2 |
import torch
|
3 |
import json
|
4 |
import os
|
@@ -9,14 +13,14 @@ class GemmaLLM:
|
|
9 |
|
10 |
def __init__(self):
|
11 |
model_id = "google/gemma-3-1b-it"
|
12 |
-
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
13 |
|
14 |
self.model = Gemma3ForCausalLM.from_pretrained(
|
15 |
model_id,
|
16 |
device_map="cpu",
|
17 |
-
quantization_config=quantization_config,
|
18 |
-
low_cpu_mem_usage=True,
|
19 |
-
torch_dtype=torch.float16,
|
20 |
token=os.environ.get("GEMMA_TOKEN"),
|
21 |
).eval()
|
22 |
|
|
|
1 |
+
from transformers import (
|
2 |
+
AutoTokenizer,
|
3 |
+
# BitsAndBytesConfig,
|
4 |
+
Gemma3ForCausalLM,
|
5 |
+
)
|
6 |
import torch
|
7 |
import json
|
8 |
import os
|
|
|
13 |
|
14 |
def __init__(self):
|
15 |
model_id = "google/gemma-3-1b-it"
|
16 |
+
# quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
17 |
|
18 |
self.model = Gemma3ForCausalLM.from_pretrained(
|
19 |
model_id,
|
20 |
device_map="cpu",
|
21 |
+
# quantization_config=quantization_config,
|
22 |
+
# low_cpu_mem_usage=True,
|
23 |
+
# torch_dtype=torch.float16,
|
24 |
token=os.environ.get("GEMMA_TOKEN"),
|
25 |
).eval()
|
26 |
|
requirements.txt
CHANGED
@@ -5,5 +5,4 @@ newspaper3k==0.2.8
|
|
5 |
torch==2.6.0
|
6 |
transformers==4.50.0
|
7 |
lxml_html_clean==0.4.1
|
8 |
-
accelerate==1.5.2
|
9 |
-
bitsandbytes==0.45.3
|
|
|
5 |
torch==2.6.0
|
6 |
transformers==4.50.0
|
7 |
lxml_html_clean==0.4.1
|
8 |
+
accelerate==1.5.2
|
|