danielhanchen commited on
Commit
ae99a6b
·
verified ·
1 Parent(s): 9c7b176

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -25,81 +25,81 @@
25
  "multi_modal_projector",
26
  "merger",
27
  "modality_projection",
28
- "language_model.model.layers.22.self_attn",
29
- "language_model.model.layers.18.self_attn",
30
- "language_model.model.layers.8.mlp",
31
- "language_model.model.layers.11.mlp",
32
  "language_model.model.layers.6.self_attn",
33
- "language_model.model.layers.19.self_attn",
34
- "vision_tower.vision_model.encoder.layers.24.self_attn",
35
- "language_model.model.layers.14.self_attn",
36
- "language_model.model.layers.7.self_attn",
37
- "language_model.model.layers.17.mlp",
38
  "language_model.model.layers.5.self_attn",
 
 
 
39
  "language_model.model.layers.9.mlp",
40
- "language_model.model.layers.3.mlp",
41
- "language_model.model.layers.3.self_attn",
 
 
42
  "language_model.model.layers.7.mlp",
 
 
 
43
  "vision_tower.vision_model.encoder.layers.19.self_attn",
44
- "language_model.model.layers.15.mlp",
45
- "language_model.model.layers.11.self_attn",
46
- "vision_tower.vision_model.encoder.layers.20.self_attn",
47
- "language_model.model.layers.4.mlp",
48
- "vision_tower.vision_model.encoder.layers.26.self_attn",
49
- "vision_tower.vision_model.encoder.layers.22.self_attn",
50
- "language_model.model.layers.5.mlp",
51
- "language_model.model.layers.13.mlp",
52
- "language_model.model.layers.2.mlp",
53
- "vision_tower.vision_model.encoder.layers.19.mlp",
54
  "vision_tower.vision_model.encoder.layers.25.self_attn",
55
- "vision_tower.vision_model.encoder.layers.24.mlp",
56
- "vision_tower.vision_model.encoder.layers.23.self_attn",
57
- "language_model.model.layers.6.mlp",
58
- "vision_tower.vision_model.encoder.layers.25.mlp",
59
- "vision_tower.vision_model.encoder.layers.17.self_attn",
60
  "vision_tower.vision_model.encoder.layers.21.self_attn",
61
- "vision_tower.vision_model.encoder.layers.16.self_attn",
62
- "vision_tower.vision_model.encoder.layers.21.mlp",
63
- "vision_tower.vision_model.encoder.layers.13.mlp",
64
- "vision_tower.vision_model.encoder.layers.22.mlp",
65
  "vision_tower.vision_model.encoder.layers.17.mlp",
 
 
 
66
  "vision_tower.vision_model.encoder.layers.20.mlp",
67
- "vision_tower.vision_model.encoder.layers.14.self_attn",
 
 
 
 
 
 
68
  "vision_tower.vision_model.encoder.layers.16.mlp",
69
- "vision_tower.vision_model.encoder.layers.18.self_attn",
70
- "vision_tower.vision_model.encoder.layers.7.mlp",
71
- "vision_tower.vision_model.encoder.layers.9.self_attn",
72
  "vision_tower.vision_model.encoder.layers.15.mlp",
73
- "vision_tower.vision_model.encoder.layers.18.mlp",
74
- "vision_tower.vision_model.encoder.layers.12.mlp",
75
- "vision_tower.vision_model.encoder.layers.5.mlp",
 
76
  "vision_tower.vision_model.encoder.layers.9.mlp",
77
- "vision_tower.vision_model.encoder.layers.14.mlp",
78
- "vision_tower.vision_model.encoder.layers.15.self_attn",
79
  "vision_tower.vision_model.encoder.layers.11.mlp",
80
- "vision_tower.vision_model.encoder.layers.10.mlp",
81
- "vision_tower.vision_model.encoder.layers.23.mlp",
82
- "vision_tower.vision_model.encoder.layers.8.mlp",
83
  "vision_tower.vision_model.encoder.layers.13.self_attn",
84
- "vision_tower.vision_model.encoder.layers.6.self_attn",
85
- "vision_tower.vision_model.encoder.layers.5.self_attn",
86
- "vision_tower.vision_model.encoder.layers.1.self_attn",
87
- "vision_tower.vision_model.encoder.layers.3.self_attn",
88
- "vision_tower.vision_model.encoder.layers.4.mlp",
89
- "vision_tower.vision_model.encoder.layers.7.self_attn",
90
- "vision_tower.vision_model.encoder.layers.11.self_attn",
91
- "vision_tower.vision_model.encoder.layers.26.mlp",
92
- "vision_tower.vision_model.encoder.layers.4.self_attn",
93
  "vision_tower.vision_model.encoder.layers.12.self_attn",
94
- "vision_tower.vision_model.encoder.layers.8.self_attn",
95
- "vision_tower.vision_model.encoder.layers.10.self_attn",
96
- "vision_tower.vision_model.encoder.layers.3.mlp",
97
  "vision_tower.vision_model.encoder.layers.6.mlp",
98
- "vision_tower.vision_model.encoder.layers.2.mlp",
99
- "vision_tower.vision_model.encoder.layers.2.self_attn",
 
 
 
100
  "vision_tower.vision_model.encoder.layers.1.mlp",
 
 
 
 
101
  "vision_tower.vision_model.encoder.layers.0.self_attn",
102
- "vision_tower.vision_model.encoder.layers.0.mlp"
 
 
 
 
 
 
 
103
  ],
104
  "llm_int8_threshold": 6.0,
105
  "load_in_4bit": true,
 
25
  "multi_modal_projector",
26
  "merger",
27
  "modality_projection",
28
+ "language_model.model.layers.14.mlp",
29
+ "language_model.model.layers.2.self_attn",
30
+ "language_model.model.layers.12.mlp",
31
+ "language_model.model.layers.8.self_attn",
32
  "language_model.model.layers.6.self_attn",
33
+ "language_model.model.layers.13.mlp",
34
+ "language_model.model.layers.1.mlp",
35
+ "language_model.model.layers.10.mlp",
36
+ "language_model.model.layers.16.mlp",
 
37
  "language_model.model.layers.5.self_attn",
38
+ "language_model.model.layers.2.mlp",
39
+ "language_model.model.layers.4.mlp",
40
+ "language_model.model.layers.5.mlp",
41
  "language_model.model.layers.9.mlp",
42
+ "language_model.model.layers.7.self_attn",
43
+ "language_model.model.layers.6.mlp",
44
+ "language_model.model.layers.8.mlp",
45
+ "vision_tower.vision_model.encoder.layers.23.self_attn",
46
  "language_model.model.layers.7.mlp",
47
+ "language_model.model.layers.3.self_attn",
48
+ "language_model.model.layers.11.mlp",
49
+ "language_model.model.layers.3.mlp",
50
  "vision_tower.vision_model.encoder.layers.19.self_attn",
 
 
 
 
 
 
 
 
 
 
51
  "vision_tower.vision_model.encoder.layers.25.self_attn",
52
+ "vision_tower.vision_model.encoder.layers.22.self_attn",
 
 
 
 
53
  "vision_tower.vision_model.encoder.layers.21.self_attn",
54
+ "vision_tower.vision_model.encoder.layers.18.self_attn",
 
 
 
55
  "vision_tower.vision_model.encoder.layers.17.mlp",
56
+ "vision_tower.vision_model.encoder.layers.24.self_attn",
57
+ "vision_tower.vision_model.encoder.layers.24.mlp",
58
+ "vision_tower.vision_model.encoder.layers.26.self_attn",
59
  "vision_tower.vision_model.encoder.layers.20.mlp",
60
+ "vision_tower.vision_model.encoder.layers.20.self_attn",
61
+ "vision_tower.vision_model.encoder.layers.23.mlp",
62
+ "vision_tower.vision_model.encoder.layers.19.mlp",
63
+ "vision_tower.vision_model.encoder.layers.15.self_attn",
64
+ "vision_tower.vision_model.encoder.layers.14.mlp",
65
+ "vision_tower.vision_model.encoder.layers.13.mlp",
66
+ "vision_tower.vision_model.encoder.layers.21.mlp",
67
  "vision_tower.vision_model.encoder.layers.16.mlp",
68
+ "vision_tower.vision_model.encoder.layers.25.mlp",
 
 
69
  "vision_tower.vision_model.encoder.layers.15.mlp",
70
+ "vision_tower.vision_model.encoder.layers.14.self_attn",
71
+ "vision_tower.vision_model.encoder.layers.16.self_attn",
72
+ "vision_tower.vision_model.encoder.layers.10.mlp",
73
+ "vision_tower.vision_model.encoder.layers.17.self_attn",
74
  "vision_tower.vision_model.encoder.layers.9.mlp",
 
 
75
  "vision_tower.vision_model.encoder.layers.11.mlp",
76
+ "vision_tower.vision_model.encoder.layers.12.mlp",
77
+ "vision_tower.vision_model.encoder.layers.10.self_attn",
78
+ "vision_tower.vision_model.encoder.layers.22.mlp",
79
  "vision_tower.vision_model.encoder.layers.13.self_attn",
80
+ "vision_tower.vision_model.encoder.layers.18.mlp",
 
 
 
 
 
 
 
 
81
  "vision_tower.vision_model.encoder.layers.12.self_attn",
82
+ "vision_tower.vision_model.encoder.layers.7.mlp",
 
 
83
  "vision_tower.vision_model.encoder.layers.6.mlp",
84
+ "vision_tower.vision_model.encoder.layers.8.self_attn",
85
+ "vision_tower.vision_model.encoder.layers.9.self_attn",
86
+ "vision_tower.vision_model.encoder.layers.4.mlp",
87
+ "vision_tower.vision_model.encoder.layers.5.mlp",
88
+ "vision_tower.vision_model.encoder.layers.8.mlp",
89
  "vision_tower.vision_model.encoder.layers.1.mlp",
90
+ "vision_tower.vision_model.encoder.layers.6.self_attn",
91
+ "vision_tower.vision_model.encoder.layers.4.self_attn",
92
+ "vision_tower.vision_model.encoder.layers.11.self_attn",
93
+ "vision_tower.vision_model.encoder.layers.7.self_attn",
94
  "vision_tower.vision_model.encoder.layers.0.self_attn",
95
+ "vision_tower.vision_model.encoder.layers.3.mlp",
96
+ "vision_tower.vision_model.encoder.layers.1.self_attn",
97
+ "vision_tower.vision_model.encoder.layers.2.mlp",
98
+ "vision_tower.vision_model.encoder.layers.5.self_attn",
99
+ "vision_tower.vision_model.encoder.layers.3.self_attn",
100
+ "vision_tower.vision_model.encoder.layers.0.mlp",
101
+ "vision_tower.vision_model.encoder.layers.26.mlp",
102
+ "vision_tower.vision_model.encoder.layers.2.self_attn"
103
  ],
104
  "llm_int8_threshold": 6.0,
105
  "load_in_4bit": true,
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e329a638c49d6f8e6d8649198401c24ca2e075ab32ac3ff0613de933b64abdf6
3
- size 4953233186
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7476392ff2404c2fdf0d9640f4564a0e037a4a21ef97068bd7b4bc0334980963
3
+ size 4955527760
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c73460988ff2f0faf63d0be2e39271cce9b84658eb49cd6a4e1b7e66d5363e32
3
- size 4978202196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09fed786bbe92e4eb31e66f2f2a8032fba3437151018914ea015fd79df861e98
3
+ size 4975309932
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7850b744104a7591a92439ed18e2e20499d23cef4616c5ba24bc68c38a99ff71
3
- size 2258633570
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aac81fb4a479e51c4194e403cec2cba634e4018cb70da4cc0f2b0e493ad816a
3
+ size 2836985436
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 12189748992
4
  },
5
  "weight_map": {
6
  "language_model.model.embed_tokens.weight": "model-00001-of-00003.safetensors",
@@ -54,23 +54,8 @@
54
  "language_model.model.layers.0.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
55
  "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
56
  "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
57
- "language_model.model.layers.1.mlp.down_proj.weight.absmax": "model-00001-of-00003.safetensors",
58
- "language_model.model.layers.1.mlp.down_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
59
- "language_model.model.layers.1.mlp.down_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
60
- "language_model.model.layers.1.mlp.down_proj.weight.quant_map": "model-00001-of-00003.safetensors",
61
- "language_model.model.layers.1.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
62
  "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
63
- "language_model.model.layers.1.mlp.gate_proj.weight.absmax": "model-00001-of-00003.safetensors",
64
- "language_model.model.layers.1.mlp.gate_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
65
- "language_model.model.layers.1.mlp.gate_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
66
- "language_model.model.layers.1.mlp.gate_proj.weight.quant_map": "model-00001-of-00003.safetensors",
67
- "language_model.model.layers.1.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
68
  "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
69
- "language_model.model.layers.1.mlp.up_proj.weight.absmax": "model-00001-of-00003.safetensors",
70
- "language_model.model.layers.1.mlp.up_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
71
- "language_model.model.layers.1.mlp.up_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
72
- "language_model.model.layers.1.mlp.up_proj.weight.quant_map": "model-00001-of-00003.safetensors",
73
- "language_model.model.layers.1.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
74
  "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
75
  "language_model.model.layers.1.post_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
76
  "language_model.model.layers.1.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
@@ -102,23 +87,8 @@
102
  "language_model.model.layers.1.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
103
  "language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
104
  "language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
105
- "language_model.model.layers.10.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
106
- "language_model.model.layers.10.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
107
- "language_model.model.layers.10.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
108
- "language_model.model.layers.10.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
109
- "language_model.model.layers.10.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
110
  "language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
111
- "language_model.model.layers.10.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
112
- "language_model.model.layers.10.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
113
- "language_model.model.layers.10.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
114
- "language_model.model.layers.10.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
115
- "language_model.model.layers.10.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
116
  "language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
117
- "language_model.model.layers.10.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
118
- "language_model.model.layers.10.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
119
- "language_model.model.layers.10.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
120
- "language_model.model.layers.10.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
121
- "language_model.model.layers.10.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
122
  "language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
123
  "language_model.model.layers.10.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
124
  "language_model.model.layers.10.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
@@ -157,29 +127,34 @@
157
  "language_model.model.layers.11.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
158
  "language_model.model.layers.11.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
159
  "language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
160
  "language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
161
  "language_model.model.layers.11.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
162
  "language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
163
  "language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
164
  "language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
165
  "language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
166
- "language_model.model.layers.12.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
167
- "language_model.model.layers.12.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
168
- "language_model.model.layers.12.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
169
- "language_model.model.layers.12.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
170
- "language_model.model.layers.12.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
171
  "language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
172
- "language_model.model.layers.12.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
173
- "language_model.model.layers.12.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
174
- "language_model.model.layers.12.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
175
- "language_model.model.layers.12.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
176
- "language_model.model.layers.12.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
177
  "language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
178
- "language_model.model.layers.12.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
179
- "language_model.model.layers.12.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
180
- "language_model.model.layers.12.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
181
- "language_model.model.layers.12.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
182
- "language_model.model.layers.12.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
183
  "language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
184
  "language_model.model.layers.12.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
185
  "language_model.model.layers.12.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
@@ -244,36 +219,56 @@
244
  "language_model.model.layers.13.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
245
  "language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
246
  "language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
247
- "language_model.model.layers.14.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
248
- "language_model.model.layers.14.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
249
- "language_model.model.layers.14.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
250
- "language_model.model.layers.14.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
251
- "language_model.model.layers.14.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
252
  "language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
253
- "language_model.model.layers.14.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
254
- "language_model.model.layers.14.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
255
- "language_model.model.layers.14.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
256
- "language_model.model.layers.14.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
257
- "language_model.model.layers.14.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
258
  "language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
259
- "language_model.model.layers.14.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
260
- "language_model.model.layers.14.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
261
- "language_model.model.layers.14.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
262
- "language_model.model.layers.14.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
263
- "language_model.model.layers.14.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
264
  "language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
265
  "language_model.model.layers.14.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
266
  "language_model.model.layers.14.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
267
  "language_model.model.layers.14.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
268
  "language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
269
  "language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
270
  "language_model.model.layers.14.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
271
  "language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
272
  "language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
273
  "language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
274
  "language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
275
  "language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
276
  "language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
277
  "language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
278
  "language_model.model.layers.15.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
279
  "language_model.model.layers.15.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
@@ -305,23 +300,8 @@
305
  "language_model.model.layers.15.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
306
  "language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
307
  "language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
308
- "language_model.model.layers.16.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
309
- "language_model.model.layers.16.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
310
- "language_model.model.layers.16.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
311
- "language_model.model.layers.16.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
312
- "language_model.model.layers.16.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
313
  "language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
314
- "language_model.model.layers.16.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
315
- "language_model.model.layers.16.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
316
- "language_model.model.layers.16.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
317
- "language_model.model.layers.16.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
318
- "language_model.model.layers.16.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
319
  "language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
320
- "language_model.model.layers.16.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
321
- "language_model.model.layers.16.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
322
- "language_model.model.layers.16.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
323
- "language_model.model.layers.16.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
324
- "language_model.model.layers.16.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
325
  "language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
326
  "language_model.model.layers.16.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
327
  "language_model.model.layers.16.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
@@ -353,8 +333,23 @@
353
  "language_model.model.layers.16.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
354
  "language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
355
  "language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
356
  "language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
357
  "language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
358
  "language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
359
  "language_model.model.layers.17.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
360
  "language_model.model.layers.17.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
@@ -408,10 +403,30 @@
408
  "language_model.model.layers.18.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
409
  "language_model.model.layers.18.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
410
  "language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
411
  "language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
412
  "language_model.model.layers.18.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
413
  "language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
414
  "language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
415
  "language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
416
  "language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
417
  "language_model.model.layers.19.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
@@ -436,10 +451,30 @@
436
  "language_model.model.layers.19.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
437
  "language_model.model.layers.19.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
438
  "language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
439
  "language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
440
  "language_model.model.layers.19.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
441
  "language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
442
  "language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
443
  "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
444
  "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
445
  "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
@@ -449,30 +484,10 @@
449
  "language_model.model.layers.2.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
450
  "language_model.model.layers.2.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
451
  "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
452
- "language_model.model.layers.2.self_attn.k_proj.weight.absmax": "model-00001-of-00003.safetensors",
453
- "language_model.model.layers.2.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
454
- "language_model.model.layers.2.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
455
- "language_model.model.layers.2.self_attn.k_proj.weight.quant_map": "model-00001-of-00003.safetensors",
456
- "language_model.model.layers.2.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
457
  "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
458
- "language_model.model.layers.2.self_attn.o_proj.weight.absmax": "model-00001-of-00003.safetensors",
459
- "language_model.model.layers.2.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
460
- "language_model.model.layers.2.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
461
- "language_model.model.layers.2.self_attn.o_proj.weight.quant_map": "model-00001-of-00003.safetensors",
462
- "language_model.model.layers.2.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
463
  "language_model.model.layers.2.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
464
  "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
465
- "language_model.model.layers.2.self_attn.q_proj.weight.absmax": "model-00001-of-00003.safetensors",
466
- "language_model.model.layers.2.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
467
- "language_model.model.layers.2.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
468
- "language_model.model.layers.2.self_attn.q_proj.weight.quant_map": "model-00001-of-00003.safetensors",
469
- "language_model.model.layers.2.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
470
  "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
471
- "language_model.model.layers.2.self_attn.v_proj.weight.absmax": "model-00001-of-00003.safetensors",
472
- "language_model.model.layers.2.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
473
- "language_model.model.layers.2.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
474
- "language_model.model.layers.2.self_attn.v_proj.weight.quant_map": "model-00001-of-00003.safetensors",
475
- "language_model.model.layers.2.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
476
  "language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
477
  "language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
478
  "language_model.model.layers.20.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
@@ -593,32 +608,52 @@
593
  "language_model.model.layers.22.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
594
  "language_model.model.layers.22.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
595
  "language_model.model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
596
  "language_model.model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
597
  "language_model.model.layers.22.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
598
  "language_model.model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
599
  "language_model.model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
600
- "language_model.model.layers.23.input_layernorm.weight": "model-00002-of-00003.safetensors",
601
- "language_model.model.layers.23.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
602
- "language_model.model.layers.23.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
603
- "language_model.model.layers.23.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
604
- "language_model.model.layers.23.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
605
- "language_model.model.layers.23.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
606
- "language_model.model.layers.23.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
 
 
 
 
 
607
  "language_model.model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
608
  "language_model.model.layers.23.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
609
  "language_model.model.layers.23.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
610
  "language_model.model.layers.23.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
611
  "language_model.model.layers.23.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
612
  "language_model.model.layers.23.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
613
- "language_model.model.layers.23.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
614
- "language_model.model.layers.23.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
615
- "language_model.model.layers.23.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
616
- "language_model.model.layers.23.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
617
- "language_model.model.layers.23.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
618
- "language_model.model.layers.23.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
619
- "language_model.model.layers.23.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
620
- "language_model.model.layers.23.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
621
- "language_model.model.layers.23.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
622
  "language_model.model.layers.23.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
623
  "language_model.model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
624
  "language_model.model.layers.23.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
@@ -645,198 +680,198 @@
645
  "language_model.model.layers.23.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
646
  "language_model.model.layers.23.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
647
  "language_model.model.layers.23.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
648
- "language_model.model.layers.24.input_layernorm.weight": "model-00002-of-00003.safetensors",
649
- "language_model.model.layers.24.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
650
- "language_model.model.layers.24.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
651
- "language_model.model.layers.24.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
652
- "language_model.model.layers.24.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
653
- "language_model.model.layers.24.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
654
- "language_model.model.layers.24.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
655
- "language_model.model.layers.24.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
656
- "language_model.model.layers.24.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
657
- "language_model.model.layers.24.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
658
- "language_model.model.layers.24.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
659
- "language_model.model.layers.24.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
660
- "language_model.model.layers.24.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
661
- "language_model.model.layers.24.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
662
- "language_model.model.layers.24.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
663
- "language_model.model.layers.24.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
664
- "language_model.model.layers.24.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
665
- "language_model.model.layers.24.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
666
- "language_model.model.layers.24.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
667
- "language_model.model.layers.24.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
668
- "language_model.model.layers.24.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
669
- "language_model.model.layers.24.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
670
- "language_model.model.layers.24.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
671
- "language_model.model.layers.24.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
672
- "language_model.model.layers.24.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
673
- "language_model.model.layers.24.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
674
- "language_model.model.layers.24.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
675
- "language_model.model.layers.24.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
676
- "language_model.model.layers.24.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
677
- "language_model.model.layers.24.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
678
- "language_model.model.layers.24.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
679
- "language_model.model.layers.24.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
680
- "language_model.model.layers.24.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
681
- "language_model.model.layers.24.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
682
- "language_model.model.layers.24.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
683
- "language_model.model.layers.24.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
684
- "language_model.model.layers.24.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
685
- "language_model.model.layers.24.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
686
- "language_model.model.layers.24.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
687
- "language_model.model.layers.24.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
688
- "language_model.model.layers.24.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
689
- "language_model.model.layers.24.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
690
- "language_model.model.layers.24.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
691
- "language_model.model.layers.24.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
692
- "language_model.model.layers.24.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
693
- "language_model.model.layers.24.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
694
- "language_model.model.layers.24.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
695
- "language_model.model.layers.24.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
696
- "language_model.model.layers.25.input_layernorm.weight": "model-00002-of-00003.safetensors",
697
- "language_model.model.layers.25.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
698
- "language_model.model.layers.25.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
699
- "language_model.model.layers.25.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
700
- "language_model.model.layers.25.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
701
- "language_model.model.layers.25.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
702
- "language_model.model.layers.25.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
703
- "language_model.model.layers.25.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
704
- "language_model.model.layers.25.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
705
- "language_model.model.layers.25.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
706
- "language_model.model.layers.25.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
707
- "language_model.model.layers.25.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
708
- "language_model.model.layers.25.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
709
- "language_model.model.layers.25.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
710
- "language_model.model.layers.25.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
711
- "language_model.model.layers.25.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
712
- "language_model.model.layers.25.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
713
- "language_model.model.layers.25.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
714
- "language_model.model.layers.25.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
715
- "language_model.model.layers.25.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
716
- "language_model.model.layers.25.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
717
- "language_model.model.layers.25.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
718
- "language_model.model.layers.25.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
719
- "language_model.model.layers.25.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
720
- "language_model.model.layers.25.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
721
- "language_model.model.layers.25.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
722
- "language_model.model.layers.25.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
723
- "language_model.model.layers.25.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
724
- "language_model.model.layers.25.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
725
- "language_model.model.layers.25.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
726
- "language_model.model.layers.25.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
727
- "language_model.model.layers.25.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
728
- "language_model.model.layers.25.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
729
- "language_model.model.layers.25.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
730
- "language_model.model.layers.25.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
731
- "language_model.model.layers.25.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
732
- "language_model.model.layers.25.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
733
- "language_model.model.layers.25.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
734
- "language_model.model.layers.25.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
735
- "language_model.model.layers.25.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
736
- "language_model.model.layers.25.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
737
- "language_model.model.layers.25.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
738
- "language_model.model.layers.25.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
739
- "language_model.model.layers.25.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
740
- "language_model.model.layers.25.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
741
- "language_model.model.layers.25.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
742
- "language_model.model.layers.25.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
743
- "language_model.model.layers.25.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
744
- "language_model.model.layers.26.input_layernorm.weight": "model-00002-of-00003.safetensors",
745
- "language_model.model.layers.26.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
746
- "language_model.model.layers.26.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
747
- "language_model.model.layers.26.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
748
- "language_model.model.layers.26.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
749
- "language_model.model.layers.26.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
750
- "language_model.model.layers.26.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
751
- "language_model.model.layers.26.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
752
- "language_model.model.layers.26.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
753
- "language_model.model.layers.26.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
754
- "language_model.model.layers.26.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
755
- "language_model.model.layers.26.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
756
- "language_model.model.layers.26.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
757
- "language_model.model.layers.26.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
758
- "language_model.model.layers.26.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
759
- "language_model.model.layers.26.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
760
- "language_model.model.layers.26.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
761
- "language_model.model.layers.26.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
762
- "language_model.model.layers.26.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
763
- "language_model.model.layers.26.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
764
- "language_model.model.layers.26.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
765
- "language_model.model.layers.26.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
766
- "language_model.model.layers.26.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
767
- "language_model.model.layers.26.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
768
- "language_model.model.layers.26.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
769
- "language_model.model.layers.26.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
770
- "language_model.model.layers.26.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
771
- "language_model.model.layers.26.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
772
- "language_model.model.layers.26.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
773
- "language_model.model.layers.26.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
774
- "language_model.model.layers.26.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
775
- "language_model.model.layers.26.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
776
- "language_model.model.layers.26.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
777
- "language_model.model.layers.26.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
778
- "language_model.model.layers.26.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
779
- "language_model.model.layers.26.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
780
- "language_model.model.layers.26.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
781
- "language_model.model.layers.26.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
782
- "language_model.model.layers.26.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
783
- "language_model.model.layers.26.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
784
- "language_model.model.layers.26.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
785
- "language_model.model.layers.26.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
786
- "language_model.model.layers.26.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
787
- "language_model.model.layers.26.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
788
- "language_model.model.layers.26.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
789
- "language_model.model.layers.26.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
790
- "language_model.model.layers.26.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
791
- "language_model.model.layers.26.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
792
- "language_model.model.layers.27.input_layernorm.weight": "model-00002-of-00003.safetensors",
793
- "language_model.model.layers.27.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
794
- "language_model.model.layers.27.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
795
- "language_model.model.layers.27.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
796
- "language_model.model.layers.27.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
797
- "language_model.model.layers.27.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
798
- "language_model.model.layers.27.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
799
- "language_model.model.layers.27.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
800
- "language_model.model.layers.27.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
801
- "language_model.model.layers.27.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
802
- "language_model.model.layers.27.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
803
- "language_model.model.layers.27.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
804
- "language_model.model.layers.27.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
805
- "language_model.model.layers.27.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
806
- "language_model.model.layers.27.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
807
- "language_model.model.layers.27.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
808
- "language_model.model.layers.27.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
809
- "language_model.model.layers.27.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
810
- "language_model.model.layers.27.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
811
- "language_model.model.layers.27.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
812
- "language_model.model.layers.27.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
813
- "language_model.model.layers.27.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
814
- "language_model.model.layers.27.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
815
- "language_model.model.layers.27.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
816
- "language_model.model.layers.27.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
817
- "language_model.model.layers.27.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
818
- "language_model.model.layers.27.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
819
- "language_model.model.layers.27.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
820
- "language_model.model.layers.27.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
821
- "language_model.model.layers.27.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
822
- "language_model.model.layers.27.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
823
- "language_model.model.layers.27.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
824
- "language_model.model.layers.27.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
825
- "language_model.model.layers.27.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
826
- "language_model.model.layers.27.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
827
- "language_model.model.layers.27.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
828
- "language_model.model.layers.27.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
829
- "language_model.model.layers.27.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
830
- "language_model.model.layers.27.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
831
- "language_model.model.layers.27.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
832
- "language_model.model.layers.27.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
833
- "language_model.model.layers.27.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
834
- "language_model.model.layers.27.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
835
- "language_model.model.layers.27.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
836
- "language_model.model.layers.27.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
837
- "language_model.model.layers.27.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
838
- "language_model.model.layers.27.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
839
- "language_model.model.layers.27.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
840
  "language_model.model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
841
  "language_model.model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
842
  "language_model.model.layers.28.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
@@ -844,12 +879,12 @@
844
  "language_model.model.layers.28.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
845
  "language_model.model.layers.28.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
846
  "language_model.model.layers.28.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
847
- "language_model.model.layers.28.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
848
- "language_model.model.layers.28.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
849
- "language_model.model.layers.28.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
850
- "language_model.model.layers.28.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
851
- "language_model.model.layers.28.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
852
- "language_model.model.layers.28.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
853
  "language_model.model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
854
  "language_model.model.layers.28.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
855
  "language_model.model.layers.28.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
@@ -859,32 +894,32 @@
859
  "language_model.model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
860
  "language_model.model.layers.28.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
861
  "language_model.model.layers.28.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
862
- "language_model.model.layers.28.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
863
- "language_model.model.layers.28.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
864
- "language_model.model.layers.28.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
865
- "language_model.model.layers.28.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
866
- "language_model.model.layers.28.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
867
- "language_model.model.layers.28.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
868
- "language_model.model.layers.28.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
869
- "language_model.model.layers.28.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
870
- "language_model.model.layers.28.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
871
- "language_model.model.layers.28.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
872
- "language_model.model.layers.28.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
873
- "language_model.model.layers.28.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
874
- "language_model.model.layers.28.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
875
- "language_model.model.layers.28.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
876
- "language_model.model.layers.28.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
877
- "language_model.model.layers.28.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
878
- "language_model.model.layers.28.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
879
- "language_model.model.layers.28.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
880
- "language_model.model.layers.28.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
881
- "language_model.model.layers.28.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
882
- "language_model.model.layers.28.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
883
- "language_model.model.layers.28.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
884
- "language_model.model.layers.28.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
885
- "language_model.model.layers.28.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
886
- "language_model.model.layers.28.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
887
- "language_model.model.layers.28.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
888
  "language_model.model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
889
  "language_model.model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
890
  "language_model.model.layers.29.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
@@ -1843,13 +1878,13 @@
1843
  "language_model.model.layers.47.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
1844
  "language_model.model.layers.47.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
1845
  "language_model.model.layers.47.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
1846
- "language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
1847
- "language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
1848
  "language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
1849
  "language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
1850
- "language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
1851
- "language_model.model.layers.5.post_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
1852
- "language_model.model.layers.5.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
1853
  "language_model.model.layers.5.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
1854
  "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
1855
  "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
@@ -1858,17 +1893,17 @@
1858
  "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
1859
  "language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00003.safetensors",
1860
  "language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1861
- "language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
1862
  "language_model.model.layers.6.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
1863
  "language_model.model.layers.6.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
1864
  "language_model.model.layers.6.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1865
  "language_model.model.layers.6.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1866
- "language_model.model.layers.6.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
1867
- "language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
1868
- "language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
1869
- "language_model.model.layers.6.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
1870
- "language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
1871
- "language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
1872
  "language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00003.safetensors",
1873
  "language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1874
  "language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
@@ -1891,30 +1926,10 @@
1891
  "language_model.model.layers.8.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1892
  "language_model.model.layers.8.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
1893
  "language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
1894
- "language_model.model.layers.8.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
1895
- "language_model.model.layers.8.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
1896
- "language_model.model.layers.8.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
1897
- "language_model.model.layers.8.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
1898
- "language_model.model.layers.8.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
1899
  "language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
1900
- "language_model.model.layers.8.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
1901
- "language_model.model.layers.8.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
1902
- "language_model.model.layers.8.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
1903
- "language_model.model.layers.8.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
1904
- "language_model.model.layers.8.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
1905
  "language_model.model.layers.8.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
1906
  "language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
1907
- "language_model.model.layers.8.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
1908
- "language_model.model.layers.8.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
1909
- "language_model.model.layers.8.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
1910
- "language_model.model.layers.8.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
1911
- "language_model.model.layers.8.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
1912
  "language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
1913
- "language_model.model.layers.8.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
1914
- "language_model.model.layers.8.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
1915
- "language_model.model.layers.8.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
1916
- "language_model.model.layers.8.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
1917
- "language_model.model.layers.8.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
1918
  "language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
1919
  "language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1920
  "language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 12767500920
4
  },
5
  "weight_map": {
6
  "language_model.model.embed_tokens.weight": "model-00001-of-00003.safetensors",
 
54
  "language_model.model.layers.0.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
55
  "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
56
  "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
57
  "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
58
  "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
59
  "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
60
  "language_model.model.layers.1.post_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
61
  "language_model.model.layers.1.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
 
87
  "language_model.model.layers.1.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
88
  "language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
89
  "language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
90
  "language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
91
  "language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
92
  "language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
93
  "language_model.model.layers.10.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
94
  "language_model.model.layers.10.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
 
127
  "language_model.model.layers.11.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
128
  "language_model.model.layers.11.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
129
  "language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
130
+ "language_model.model.layers.11.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
131
+ "language_model.model.layers.11.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
132
+ "language_model.model.layers.11.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
133
+ "language_model.model.layers.11.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
134
+ "language_model.model.layers.11.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
135
  "language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
136
+ "language_model.model.layers.11.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
137
+ "language_model.model.layers.11.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
138
+ "language_model.model.layers.11.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
139
+ "language_model.model.layers.11.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
140
+ "language_model.model.layers.11.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
141
  "language_model.model.layers.11.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
142
  "language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
143
+ "language_model.model.layers.11.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
144
+ "language_model.model.layers.11.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
145
+ "language_model.model.layers.11.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
146
+ "language_model.model.layers.11.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
147
+ "language_model.model.layers.11.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
148
  "language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
149
+ "language_model.model.layers.11.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
150
+ "language_model.model.layers.11.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
151
+ "language_model.model.layers.11.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
152
+ "language_model.model.layers.11.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
153
+ "language_model.model.layers.11.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
154
  "language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
155
  "language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
156
  "language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
157
  "language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
158
  "language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
159
  "language_model.model.layers.12.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
160
  "language_model.model.layers.12.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
 
219
  "language_model.model.layers.13.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
220
  "language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
221
  "language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
222
  "language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
223
  "language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
224
  "language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
225
  "language_model.model.layers.14.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
226
  "language_model.model.layers.14.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
227
  "language_model.model.layers.14.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
228
  "language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
229
+ "language_model.model.layers.14.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
230
+ "language_model.model.layers.14.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
231
+ "language_model.model.layers.14.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
232
+ "language_model.model.layers.14.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
233
+ "language_model.model.layers.14.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
234
  "language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
235
+ "language_model.model.layers.14.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
236
+ "language_model.model.layers.14.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
237
+ "language_model.model.layers.14.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
238
+ "language_model.model.layers.14.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
239
+ "language_model.model.layers.14.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
240
  "language_model.model.layers.14.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
241
  "language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
242
+ "language_model.model.layers.14.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
243
+ "language_model.model.layers.14.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
244
+ "language_model.model.layers.14.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
245
+ "language_model.model.layers.14.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
246
+ "language_model.model.layers.14.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
247
  "language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
248
+ "language_model.model.layers.14.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
249
+ "language_model.model.layers.14.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
250
+ "language_model.model.layers.14.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
251
+ "language_model.model.layers.14.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
252
+ "language_model.model.layers.14.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
253
  "language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
254
  "language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
255
+ "language_model.model.layers.15.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
256
+ "language_model.model.layers.15.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
257
+ "language_model.model.layers.15.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
258
+ "language_model.model.layers.15.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
259
+ "language_model.model.layers.15.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
260
  "language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
261
+ "language_model.model.layers.15.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
262
+ "language_model.model.layers.15.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
263
+ "language_model.model.layers.15.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
264
+ "language_model.model.layers.15.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
265
+ "language_model.model.layers.15.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
266
  "language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
267
+ "language_model.model.layers.15.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
268
+ "language_model.model.layers.15.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
269
+ "language_model.model.layers.15.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
270
+ "language_model.model.layers.15.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
271
+ "language_model.model.layers.15.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
272
  "language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
273
  "language_model.model.layers.15.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
274
  "language_model.model.layers.15.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
 
300
  "language_model.model.layers.15.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
301
  "language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
302
  "language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
303
  "language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
304
  "language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
305
  "language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
306
  "language_model.model.layers.16.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
307
  "language_model.model.layers.16.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
 
333
  "language_model.model.layers.16.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
334
  "language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
335
  "language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
336
+ "language_model.model.layers.17.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
337
+ "language_model.model.layers.17.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
338
+ "language_model.model.layers.17.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
339
+ "language_model.model.layers.17.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
340
+ "language_model.model.layers.17.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
341
  "language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
342
+ "language_model.model.layers.17.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
343
+ "language_model.model.layers.17.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
344
+ "language_model.model.layers.17.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
345
+ "language_model.model.layers.17.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
346
+ "language_model.model.layers.17.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
347
  "language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
348
+ "language_model.model.layers.17.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
349
+ "language_model.model.layers.17.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
350
+ "language_model.model.layers.17.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
351
+ "language_model.model.layers.17.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
352
+ "language_model.model.layers.17.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
353
  "language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
354
  "language_model.model.layers.17.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
355
  "language_model.model.layers.17.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
 
403
  "language_model.model.layers.18.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
404
  "language_model.model.layers.18.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
405
  "language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
406
+ "language_model.model.layers.18.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
407
+ "language_model.model.layers.18.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
408
+ "language_model.model.layers.18.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
409
+ "language_model.model.layers.18.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
410
+ "language_model.model.layers.18.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
411
  "language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
412
+ "language_model.model.layers.18.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
413
+ "language_model.model.layers.18.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
414
+ "language_model.model.layers.18.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
415
+ "language_model.model.layers.18.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
416
+ "language_model.model.layers.18.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
417
  "language_model.model.layers.18.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
418
  "language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
419
+ "language_model.model.layers.18.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
420
+ "language_model.model.layers.18.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
421
+ "language_model.model.layers.18.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
422
+ "language_model.model.layers.18.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
423
+ "language_model.model.layers.18.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
424
  "language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
425
+ "language_model.model.layers.18.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
426
+ "language_model.model.layers.18.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
427
+ "language_model.model.layers.18.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
428
+ "language_model.model.layers.18.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
429
+ "language_model.model.layers.18.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
430
  "language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
431
  "language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
432
  "language_model.model.layers.19.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
 
451
  "language_model.model.layers.19.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
452
  "language_model.model.layers.19.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
453
  "language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
454
+ "language_model.model.layers.19.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
455
+ "language_model.model.layers.19.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
456
+ "language_model.model.layers.19.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
457
+ "language_model.model.layers.19.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
458
+ "language_model.model.layers.19.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
459
  "language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
460
+ "language_model.model.layers.19.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
461
+ "language_model.model.layers.19.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
462
+ "language_model.model.layers.19.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
463
+ "language_model.model.layers.19.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
464
+ "language_model.model.layers.19.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
465
  "language_model.model.layers.19.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
466
  "language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
467
+ "language_model.model.layers.19.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
468
+ "language_model.model.layers.19.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
469
+ "language_model.model.layers.19.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
470
+ "language_model.model.layers.19.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
471
+ "language_model.model.layers.19.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
472
  "language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
473
+ "language_model.model.layers.19.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
474
+ "language_model.model.layers.19.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
475
+ "language_model.model.layers.19.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
476
+ "language_model.model.layers.19.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
477
+ "language_model.model.layers.19.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
478
  "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
479
  "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
480
  "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
 
484
  "language_model.model.layers.2.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
485
  "language_model.model.layers.2.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
486
  "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
487
  "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
488
  "language_model.model.layers.2.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
489
  "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
490
  "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
491
  "language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
492
  "language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
493
  "language_model.model.layers.20.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
 
608
  "language_model.model.layers.22.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
609
  "language_model.model.layers.22.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
610
  "language_model.model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
611
+ "language_model.model.layers.22.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
612
+ "language_model.model.layers.22.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
613
+ "language_model.model.layers.22.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
614
+ "language_model.model.layers.22.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
615
+ "language_model.model.layers.22.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
616
  "language_model.model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
617
+ "language_model.model.layers.22.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
618
+ "language_model.model.layers.22.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
619
+ "language_model.model.layers.22.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
620
+ "language_model.model.layers.22.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
621
+ "language_model.model.layers.22.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
622
  "language_model.model.layers.22.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
623
  "language_model.model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
624
+ "language_model.model.layers.22.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
625
+ "language_model.model.layers.22.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
626
+ "language_model.model.layers.22.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
627
+ "language_model.model.layers.22.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
628
+ "language_model.model.layers.22.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
629
  "language_model.model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
630
+ "language_model.model.layers.22.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
631
+ "language_model.model.layers.22.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
632
+ "language_model.model.layers.22.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
633
+ "language_model.model.layers.22.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
634
+ "language_model.model.layers.22.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
635
+ "language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
636
+ "language_model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
637
+ "language_model.model.layers.23.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
638
+ "language_model.model.layers.23.mlp.down_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
639
+ "language_model.model.layers.23.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
640
+ "language_model.model.layers.23.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
641
+ "language_model.model.layers.23.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
642
  "language_model.model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
643
  "language_model.model.layers.23.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
644
  "language_model.model.layers.23.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
645
  "language_model.model.layers.23.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
646
  "language_model.model.layers.23.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
647
  "language_model.model.layers.23.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
648
+ "language_model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
649
+ "language_model.model.layers.23.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
650
+ "language_model.model.layers.23.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
651
+ "language_model.model.layers.23.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
652
+ "language_model.model.layers.23.mlp.up_proj.weight.quant_map": "model-00003-of-00003.safetensors",
653
+ "language_model.model.layers.23.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
654
+ "language_model.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
655
+ "language_model.model.layers.23.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
656
+ "language_model.model.layers.23.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
657
  "language_model.model.layers.23.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
658
  "language_model.model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
659
  "language_model.model.layers.23.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
 
680
  "language_model.model.layers.23.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
681
  "language_model.model.layers.23.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
682
  "language_model.model.layers.23.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
683
+ "language_model.model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
684
+ "language_model.model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
685
+ "language_model.model.layers.24.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
686
+ "language_model.model.layers.24.mlp.down_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
687
+ "language_model.model.layers.24.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
688
+ "language_model.model.layers.24.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
689
+ "language_model.model.layers.24.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
690
+ "language_model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
691
+ "language_model.model.layers.24.mlp.gate_proj.weight.absmax": "model-00003-of-00003.safetensors",
692
+ "language_model.model.layers.24.mlp.gate_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
693
+ "language_model.model.layers.24.mlp.gate_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
694
+ "language_model.model.layers.24.mlp.gate_proj.weight.quant_map": "model-00003-of-00003.safetensors",
695
+ "language_model.model.layers.24.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
696
+ "language_model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
697
+ "language_model.model.layers.24.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
698
+ "language_model.model.layers.24.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
699
+ "language_model.model.layers.24.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
700
+ "language_model.model.layers.24.mlp.up_proj.weight.quant_map": "model-00003-of-00003.safetensors",
701
+ "language_model.model.layers.24.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
702
+ "language_model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
703
+ "language_model.model.layers.24.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
704
+ "language_model.model.layers.24.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
705
+ "language_model.model.layers.24.self_attn.k_norm.weight": "model-00003-of-00003.safetensors",
706
+ "language_model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
707
+ "language_model.model.layers.24.self_attn.k_proj.weight.absmax": "model-00003-of-00003.safetensors",
708
+ "language_model.model.layers.24.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
709
+ "language_model.model.layers.24.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
710
+ "language_model.model.layers.24.self_attn.k_proj.weight.quant_map": "model-00003-of-00003.safetensors",
711
+ "language_model.model.layers.24.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
712
+ "language_model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
713
+ "language_model.model.layers.24.self_attn.o_proj.weight.absmax": "model-00003-of-00003.safetensors",
714
+ "language_model.model.layers.24.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
715
+ "language_model.model.layers.24.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
716
+ "language_model.model.layers.24.self_attn.o_proj.weight.quant_map": "model-00003-of-00003.safetensors",
717
+ "language_model.model.layers.24.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
718
+ "language_model.model.layers.24.self_attn.q_norm.weight": "model-00003-of-00003.safetensors",
719
+ "language_model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
720
+ "language_model.model.layers.24.self_attn.q_proj.weight.absmax": "model-00003-of-00003.safetensors",
721
+ "language_model.model.layers.24.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
722
+ "language_model.model.layers.24.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
723
+ "language_model.model.layers.24.self_attn.q_proj.weight.quant_map": "model-00003-of-00003.safetensors",
724
+ "language_model.model.layers.24.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
725
+ "language_model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
726
+ "language_model.model.layers.24.self_attn.v_proj.weight.absmax": "model-00003-of-00003.safetensors",
727
+ "language_model.model.layers.24.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
728
+ "language_model.model.layers.24.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
729
+ "language_model.model.layers.24.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
730
+ "language_model.model.layers.24.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
731
+ "language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
732
+ "language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
733
+ "language_model.model.layers.25.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
734
+ "language_model.model.layers.25.mlp.down_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
735
+ "language_model.model.layers.25.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
736
+ "language_model.model.layers.25.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
737
+ "language_model.model.layers.25.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
738
+ "language_model.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
739
+ "language_model.model.layers.25.mlp.gate_proj.weight.absmax": "model-00003-of-00003.safetensors",
740
+ "language_model.model.layers.25.mlp.gate_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
741
+ "language_model.model.layers.25.mlp.gate_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
742
+ "language_model.model.layers.25.mlp.gate_proj.weight.quant_map": "model-00003-of-00003.safetensors",
743
+ "language_model.model.layers.25.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
744
+ "language_model.model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
745
+ "language_model.model.layers.25.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
746
+ "language_model.model.layers.25.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
747
+ "language_model.model.layers.25.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
748
+ "language_model.model.layers.25.mlp.up_proj.weight.quant_map": "model-00003-of-00003.safetensors",
749
+ "language_model.model.layers.25.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
750
+ "language_model.model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
751
+ "language_model.model.layers.25.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
752
+ "language_model.model.layers.25.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
753
+ "language_model.model.layers.25.self_attn.k_norm.weight": "model-00003-of-00003.safetensors",
754
+ "language_model.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
755
+ "language_model.model.layers.25.self_attn.k_proj.weight.absmax": "model-00003-of-00003.safetensors",
756
+ "language_model.model.layers.25.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
757
+ "language_model.model.layers.25.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
758
+ "language_model.model.layers.25.self_attn.k_proj.weight.quant_map": "model-00003-of-00003.safetensors",
759
+ "language_model.model.layers.25.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
760
+ "language_model.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
761
+ "language_model.model.layers.25.self_attn.o_proj.weight.absmax": "model-00003-of-00003.safetensors",
762
+ "language_model.model.layers.25.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
763
+ "language_model.model.layers.25.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
764
+ "language_model.model.layers.25.self_attn.o_proj.weight.quant_map": "model-00003-of-00003.safetensors",
765
+ "language_model.model.layers.25.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
766
+ "language_model.model.layers.25.self_attn.q_norm.weight": "model-00003-of-00003.safetensors",
767
+ "language_model.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
768
+ "language_model.model.layers.25.self_attn.q_proj.weight.absmax": "model-00003-of-00003.safetensors",
769
+ "language_model.model.layers.25.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
770
+ "language_model.model.layers.25.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
771
+ "language_model.model.layers.25.self_attn.q_proj.weight.quant_map": "model-00003-of-00003.safetensors",
772
+ "language_model.model.layers.25.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
773
+ "language_model.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
774
+ "language_model.model.layers.25.self_attn.v_proj.weight.absmax": "model-00003-of-00003.safetensors",
775
+ "language_model.model.layers.25.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
776
+ "language_model.model.layers.25.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
777
+ "language_model.model.layers.25.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
778
+ "language_model.model.layers.25.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
779
+ "language_model.model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
780
+ "language_model.model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
781
+ "language_model.model.layers.26.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
782
+ "language_model.model.layers.26.mlp.down_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
783
+ "language_model.model.layers.26.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
784
+ "language_model.model.layers.26.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
785
+ "language_model.model.layers.26.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
786
+ "language_model.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
787
+ "language_model.model.layers.26.mlp.gate_proj.weight.absmax": "model-00003-of-00003.safetensors",
788
+ "language_model.model.layers.26.mlp.gate_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
789
+ "language_model.model.layers.26.mlp.gate_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
790
+ "language_model.model.layers.26.mlp.gate_proj.weight.quant_map": "model-00003-of-00003.safetensors",
791
+ "language_model.model.layers.26.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
792
+ "language_model.model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
793
+ "language_model.model.layers.26.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
794
+ "language_model.model.layers.26.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
795
+ "language_model.model.layers.26.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
796
+ "language_model.model.layers.26.mlp.up_proj.weight.quant_map": "model-00003-of-00003.safetensors",
797
+ "language_model.model.layers.26.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
798
+ "language_model.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
799
+ "language_model.model.layers.26.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
800
+ "language_model.model.layers.26.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
801
+ "language_model.model.layers.26.self_attn.k_norm.weight": "model-00003-of-00003.safetensors",
802
+ "language_model.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
803
+ "language_model.model.layers.26.self_attn.k_proj.weight.absmax": "model-00003-of-00003.safetensors",
804
+ "language_model.model.layers.26.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
805
+ "language_model.model.layers.26.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
806
+ "language_model.model.layers.26.self_attn.k_proj.weight.quant_map": "model-00003-of-00003.safetensors",
807
+ "language_model.model.layers.26.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
808
+ "language_model.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
809
+ "language_model.model.layers.26.self_attn.o_proj.weight.absmax": "model-00003-of-00003.safetensors",
810
+ "language_model.model.layers.26.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
811
+ "language_model.model.layers.26.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
812
+ "language_model.model.layers.26.self_attn.o_proj.weight.quant_map": "model-00003-of-00003.safetensors",
813
+ "language_model.model.layers.26.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
814
+ "language_model.model.layers.26.self_attn.q_norm.weight": "model-00003-of-00003.safetensors",
815
+ "language_model.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
816
+ "language_model.model.layers.26.self_attn.q_proj.weight.absmax": "model-00003-of-00003.safetensors",
817
+ "language_model.model.layers.26.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
818
+ "language_model.model.layers.26.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
819
+ "language_model.model.layers.26.self_attn.q_proj.weight.quant_map": "model-00003-of-00003.safetensors",
820
+ "language_model.model.layers.26.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
821
+ "language_model.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
822
+ "language_model.model.layers.26.self_attn.v_proj.weight.absmax": "model-00003-of-00003.safetensors",
823
+ "language_model.model.layers.26.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
824
+ "language_model.model.layers.26.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
825
+ "language_model.model.layers.26.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
826
+ "language_model.model.layers.26.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
827
+ "language_model.model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
828
+ "language_model.model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
829
+ "language_model.model.layers.27.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
830
+ "language_model.model.layers.27.mlp.down_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
831
+ "language_model.model.layers.27.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
832
+ "language_model.model.layers.27.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
833
+ "language_model.model.layers.27.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
834
+ "language_model.model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
835
+ "language_model.model.layers.27.mlp.gate_proj.weight.absmax": "model-00003-of-00003.safetensors",
836
+ "language_model.model.layers.27.mlp.gate_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
837
+ "language_model.model.layers.27.mlp.gate_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
838
+ "language_model.model.layers.27.mlp.gate_proj.weight.quant_map": "model-00003-of-00003.safetensors",
839
+ "language_model.model.layers.27.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
840
+ "language_model.model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
841
+ "language_model.model.layers.27.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
842
+ "language_model.model.layers.27.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
843
+ "language_model.model.layers.27.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
844
+ "language_model.model.layers.27.mlp.up_proj.weight.quant_map": "model-00003-of-00003.safetensors",
845
+ "language_model.model.layers.27.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
846
+ "language_model.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
847
+ "language_model.model.layers.27.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
848
+ "language_model.model.layers.27.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
849
+ "language_model.model.layers.27.self_attn.k_norm.weight": "model-00003-of-00003.safetensors",
850
+ "language_model.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
851
+ "language_model.model.layers.27.self_attn.k_proj.weight.absmax": "model-00003-of-00003.safetensors",
852
+ "language_model.model.layers.27.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
853
+ "language_model.model.layers.27.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
854
+ "language_model.model.layers.27.self_attn.k_proj.weight.quant_map": "model-00003-of-00003.safetensors",
855
+ "language_model.model.layers.27.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
856
+ "language_model.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
857
+ "language_model.model.layers.27.self_attn.o_proj.weight.absmax": "model-00003-of-00003.safetensors",
858
+ "language_model.model.layers.27.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
859
+ "language_model.model.layers.27.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
860
+ "language_model.model.layers.27.self_attn.o_proj.weight.quant_map": "model-00003-of-00003.safetensors",
861
+ "language_model.model.layers.27.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
862
+ "language_model.model.layers.27.self_attn.q_norm.weight": "model-00003-of-00003.safetensors",
863
+ "language_model.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
864
+ "language_model.model.layers.27.self_attn.q_proj.weight.absmax": "model-00003-of-00003.safetensors",
865
+ "language_model.model.layers.27.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
866
+ "language_model.model.layers.27.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
867
+ "language_model.model.layers.27.self_attn.q_proj.weight.quant_map": "model-00003-of-00003.safetensors",
868
+ "language_model.model.layers.27.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
869
+ "language_model.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
870
+ "language_model.model.layers.27.self_attn.v_proj.weight.absmax": "model-00003-of-00003.safetensors",
871
+ "language_model.model.layers.27.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
872
+ "language_model.model.layers.27.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
873
+ "language_model.model.layers.27.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
874
+ "language_model.model.layers.27.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
875
  "language_model.model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
876
  "language_model.model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
877
  "language_model.model.layers.28.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
 
879
  "language_model.model.layers.28.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
880
  "language_model.model.layers.28.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
881
  "language_model.model.layers.28.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
882
+ "language_model.model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
883
+ "language_model.model.layers.28.mlp.gate_proj.weight.absmax": "model-00003-of-00003.safetensors",
884
+ "language_model.model.layers.28.mlp.gate_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
885
+ "language_model.model.layers.28.mlp.gate_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
886
+ "language_model.model.layers.28.mlp.gate_proj.weight.quant_map": "model-00003-of-00003.safetensors",
887
+ "language_model.model.layers.28.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
888
  "language_model.model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
889
  "language_model.model.layers.28.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
890
  "language_model.model.layers.28.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
 
894
  "language_model.model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
895
  "language_model.model.layers.28.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
896
  "language_model.model.layers.28.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
897
+ "language_model.model.layers.28.self_attn.k_norm.weight": "model-00003-of-00003.safetensors",
898
+ "language_model.model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
899
+ "language_model.model.layers.28.self_attn.k_proj.weight.absmax": "model-00003-of-00003.safetensors",
900
+ "language_model.model.layers.28.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
901
+ "language_model.model.layers.28.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
902
+ "language_model.model.layers.28.self_attn.k_proj.weight.quant_map": "model-00003-of-00003.safetensors",
903
+ "language_model.model.layers.28.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
904
+ "language_model.model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
905
+ "language_model.model.layers.28.self_attn.o_proj.weight.absmax": "model-00003-of-00003.safetensors",
906
+ "language_model.model.layers.28.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
907
+ "language_model.model.layers.28.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
908
+ "language_model.model.layers.28.self_attn.o_proj.weight.quant_map": "model-00003-of-00003.safetensors",
909
+ "language_model.model.layers.28.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
910
+ "language_model.model.layers.28.self_attn.q_norm.weight": "model-00003-of-00003.safetensors",
911
+ "language_model.model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
912
+ "language_model.model.layers.28.self_attn.q_proj.weight.absmax": "model-00003-of-00003.safetensors",
913
+ "language_model.model.layers.28.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
914
+ "language_model.model.layers.28.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
915
+ "language_model.model.layers.28.self_attn.q_proj.weight.quant_map": "model-00003-of-00003.safetensors",
916
+ "language_model.model.layers.28.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
917
+ "language_model.model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
918
+ "language_model.model.layers.28.self_attn.v_proj.weight.absmax": "model-00003-of-00003.safetensors",
919
+ "language_model.model.layers.28.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
920
+ "language_model.model.layers.28.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
921
+ "language_model.model.layers.28.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
922
+ "language_model.model.layers.28.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
923
  "language_model.model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
924
  "language_model.model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
925
  "language_model.model.layers.29.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
 
1878
  "language_model.model.layers.47.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
1879
  "language_model.model.layers.47.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
1880
  "language_model.model.layers.47.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
1881
+ "language_model.model.layers.5.input_layernorm.weight": "model-00002-of-00003.safetensors",
1882
+ "language_model.model.layers.5.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1883
  "language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
1884
  "language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
1885
+ "language_model.model.layers.5.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
1886
+ "language_model.model.layers.5.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1887
+ "language_model.model.layers.5.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1888
  "language_model.model.layers.5.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
1889
  "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
1890
  "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
 
1893
  "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
1894
  "language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00003.safetensors",
1895
  "language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1896
+ "language_model.model.layers.6.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
1897
  "language_model.model.layers.6.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
1898
  "language_model.model.layers.6.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
1899
  "language_model.model.layers.6.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1900
  "language_model.model.layers.6.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1901
+ "language_model.model.layers.6.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
1902
+ "language_model.model.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
1903
+ "language_model.model.layers.6.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
1904
+ "language_model.model.layers.6.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
1905
+ "language_model.model.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
1906
+ "language_model.model.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
1907
  "language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00003.safetensors",
1908
  "language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1909
  "language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
1926
  "language_model.model.layers.8.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1927
  "language_model.model.layers.8.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
1928
  "language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
1929
  "language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
1930
  "language_model.model.layers.8.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
1931
  "language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
1932
  "language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
1933
  "language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
1934
  "language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1935
  "language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
tokenizer_config.json CHANGED
@@ -1107,7 +1107,7 @@
1107
  "special": false
1108
  },
1109
  "138": {
1110
- "content": "▁▁",
1111
  "lstrip": false,
1112
  "normalized": false,
1113
  "rstrip": false,
@@ -1115,7 +1115,7 @@
1115
  "special": false
1116
  },
1117
  "139": {
1118
- "content": "▁▁▁",
1119
  "lstrip": false,
1120
  "normalized": false,
1121
  "rstrip": false,
@@ -1123,7 +1123,7 @@
1123
  "special": false
1124
  },
1125
  "140": {
1126
- "content": "▁▁▁▁",
1127
  "lstrip": false,
1128
  "normalized": false,
1129
  "rstrip": false,
@@ -1131,7 +1131,7 @@
1131
  "special": false
1132
  },
1133
  "141": {
1134
- "content": "▁▁▁▁▁",
1135
  "lstrip": false,
1136
  "normalized": false,
1137
  "rstrip": false,
@@ -1139,7 +1139,7 @@
1139
  "special": false
1140
  },
1141
  "142": {
1142
- "content": "▁▁▁▁▁▁",
1143
  "lstrip": false,
1144
  "normalized": false,
1145
  "rstrip": false,
@@ -1147,7 +1147,7 @@
1147
  "special": false
1148
  },
1149
  "143": {
1150
- "content": "▁▁▁▁▁▁▁",
1151
  "lstrip": false,
1152
  "normalized": false,
1153
  "rstrip": false,
@@ -1155,7 +1155,7 @@
1155
  "special": false
1156
  },
1157
  "144": {
1158
- "content": "▁▁▁▁▁▁▁▁",
1159
  "lstrip": false,
1160
  "normalized": false,
1161
  "rstrip": false,
@@ -1163,7 +1163,7 @@
1163
  "special": false
1164
  },
1165
  "145": {
1166
- "content": "▁▁▁▁▁▁▁▁▁",
1167
  "lstrip": false,
1168
  "normalized": false,
1169
  "rstrip": false,
@@ -1171,7 +1171,7 @@
1171
  "special": false
1172
  },
1173
  "146": {
1174
- "content": "▁▁▁▁▁▁▁▁▁▁",
1175
  "lstrip": false,
1176
  "normalized": false,
1177
  "rstrip": false,
@@ -1179,7 +1179,7 @@
1179
  "special": false
1180
  },
1181
  "147": {
1182
- "content": "▁▁▁▁▁▁▁▁▁▁▁",
1183
  "lstrip": false,
1184
  "normalized": false,
1185
  "rstrip": false,
@@ -1187,7 +1187,7 @@
1187
  "special": false
1188
  },
1189
  "148": {
1190
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁",
1191
  "lstrip": false,
1192
  "normalized": false,
1193
  "rstrip": false,
@@ -1195,7 +1195,7 @@
1195
  "special": false
1196
  },
1197
  "149": {
1198
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁",
1199
  "lstrip": false,
1200
  "normalized": false,
1201
  "rstrip": false,
@@ -1203,7 +1203,7 @@
1203
  "special": false
1204
  },
1205
  "150": {
1206
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1207
  "lstrip": false,
1208
  "normalized": false,
1209
  "rstrip": false,
@@ -1211,7 +1211,7 @@
1211
  "special": false
1212
  },
1213
  "151": {
1214
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1215
  "lstrip": false,
1216
  "normalized": false,
1217
  "rstrip": false,
@@ -1219,7 +1219,7 @@
1219
  "special": false
1220
  },
1221
  "152": {
1222
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1223
  "lstrip": false,
1224
  "normalized": false,
1225
  "rstrip": false,
@@ -1227,7 +1227,7 @@
1227
  "special": false
1228
  },
1229
  "153": {
1230
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1231
  "lstrip": false,
1232
  "normalized": false,
1233
  "rstrip": false,
@@ -1235,7 +1235,7 @@
1235
  "special": false
1236
  },
1237
  "154": {
1238
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1239
  "lstrip": false,
1240
  "normalized": false,
1241
  "rstrip": false,
@@ -1243,7 +1243,7 @@
1243
  "special": false
1244
  },
1245
  "155": {
1246
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1247
  "lstrip": false,
1248
  "normalized": false,
1249
  "rstrip": false,
@@ -1251,7 +1251,7 @@
1251
  "special": false
1252
  },
1253
  "156": {
1254
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1255
  "lstrip": false,
1256
  "normalized": false,
1257
  "rstrip": false,
@@ -1259,7 +1259,7 @@
1259
  "special": false
1260
  },
1261
  "157": {
1262
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1263
  "lstrip": false,
1264
  "normalized": false,
1265
  "rstrip": false,
@@ -1267,7 +1267,7 @@
1267
  "special": false
1268
  },
1269
  "158": {
1270
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1271
  "lstrip": false,
1272
  "normalized": false,
1273
  "rstrip": false,
@@ -1275,7 +1275,7 @@
1275
  "special": false
1276
  },
1277
  "159": {
1278
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1279
  "lstrip": false,
1280
  "normalized": false,
1281
  "rstrip": false,
@@ -1283,7 +1283,7 @@
1283
  "special": false
1284
  },
1285
  "160": {
1286
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1287
  "lstrip": false,
1288
  "normalized": false,
1289
  "rstrip": false,
@@ -1291,7 +1291,7 @@
1291
  "special": false
1292
  },
1293
  "161": {
1294
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1295
  "lstrip": false,
1296
  "normalized": false,
1297
  "rstrip": false,
@@ -1299,7 +1299,7 @@
1299
  "special": false
1300
  },
1301
  "162": {
1302
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1303
  "lstrip": false,
1304
  "normalized": false,
1305
  "rstrip": false,
@@ -1307,7 +1307,7 @@
1307
  "special": false
1308
  },
1309
  "163": {
1310
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1311
  "lstrip": false,
1312
  "normalized": false,
1313
  "rstrip": false,
@@ -1315,7 +1315,7 @@
1315
  "special": false
1316
  },
1317
  "164": {
1318
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1319
  "lstrip": false,
1320
  "normalized": false,
1321
  "rstrip": false,
@@ -1323,7 +1323,7 @@
1323
  "special": false
1324
  },
1325
  "165": {
1326
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1327
  "lstrip": false,
1328
  "normalized": false,
1329
  "rstrip": false,
@@ -1331,7 +1331,7 @@
1331
  "special": false
1332
  },
1333
  "166": {
1334
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1335
  "lstrip": false,
1336
  "normalized": false,
1337
  "rstrip": false,
@@ -1339,7 +1339,7 @@
1339
  "special": false
1340
  },
1341
  "167": {
1342
- "content": "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
1343
  "lstrip": false,
1344
  "normalized": false,
1345
  "rstrip": false,
@@ -51344,4 +51344,4 @@
51344
  "tokenizer_class": "GemmaTokenizer",
51345
  "unk_token": "<unk>",
51346
  "use_default_system_prompt": false
51347
- }
 
1107
  "special": false
1108
  },
1109
  "138": {
1110
+ "content": "\u2581\u2581",
1111
  "lstrip": false,
1112
  "normalized": false,
1113
  "rstrip": false,
 
1115
  "special": false
1116
  },
1117
  "139": {
1118
+ "content": "\u2581\u2581\u2581",
1119
  "lstrip": false,
1120
  "normalized": false,
1121
  "rstrip": false,
 
1123
  "special": false
1124
  },
1125
  "140": {
1126
+ "content": "\u2581\u2581\u2581\u2581",
1127
  "lstrip": false,
1128
  "normalized": false,
1129
  "rstrip": false,
 
1131
  "special": false
1132
  },
1133
  "141": {
1134
+ "content": "\u2581\u2581\u2581\u2581\u2581",
1135
  "lstrip": false,
1136
  "normalized": false,
1137
  "rstrip": false,
 
1139
  "special": false
1140
  },
1141
  "142": {
1142
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581",
1143
  "lstrip": false,
1144
  "normalized": false,
1145
  "rstrip": false,
 
1147
  "special": false
1148
  },
1149
  "143": {
1150
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1151
  "lstrip": false,
1152
  "normalized": false,
1153
  "rstrip": false,
 
1155
  "special": false
1156
  },
1157
  "144": {
1158
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1159
  "lstrip": false,
1160
  "normalized": false,
1161
  "rstrip": false,
 
1163
  "special": false
1164
  },
1165
  "145": {
1166
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1167
  "lstrip": false,
1168
  "normalized": false,
1169
  "rstrip": false,
 
1171
  "special": false
1172
  },
1173
  "146": {
1174
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1175
  "lstrip": false,
1176
  "normalized": false,
1177
  "rstrip": false,
 
1179
  "special": false
1180
  },
1181
  "147": {
1182
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1183
  "lstrip": false,
1184
  "normalized": false,
1185
  "rstrip": false,
 
1187
  "special": false
1188
  },
1189
  "148": {
1190
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1191
  "lstrip": false,
1192
  "normalized": false,
1193
  "rstrip": false,
 
1195
  "special": false
1196
  },
1197
  "149": {
1198
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1199
  "lstrip": false,
1200
  "normalized": false,
1201
  "rstrip": false,
 
1203
  "special": false
1204
  },
1205
  "150": {
1206
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1207
  "lstrip": false,
1208
  "normalized": false,
1209
  "rstrip": false,
 
1211
  "special": false
1212
  },
1213
  "151": {
1214
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1215
  "lstrip": false,
1216
  "normalized": false,
1217
  "rstrip": false,
 
1219
  "special": false
1220
  },
1221
  "152": {
1222
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1223
  "lstrip": false,
1224
  "normalized": false,
1225
  "rstrip": false,
 
1227
  "special": false
1228
  },
1229
  "153": {
1230
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1231
  "lstrip": false,
1232
  "normalized": false,
1233
  "rstrip": false,
 
1235
  "special": false
1236
  },
1237
  "154": {
1238
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1239
  "lstrip": false,
1240
  "normalized": false,
1241
  "rstrip": false,
 
1243
  "special": false
1244
  },
1245
  "155": {
1246
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1247
  "lstrip": false,
1248
  "normalized": false,
1249
  "rstrip": false,
 
1251
  "special": false
1252
  },
1253
  "156": {
1254
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1255
  "lstrip": false,
1256
  "normalized": false,
1257
  "rstrip": false,
 
1259
  "special": false
1260
  },
1261
  "157": {
1262
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1263
  "lstrip": false,
1264
  "normalized": false,
1265
  "rstrip": false,
 
1267
  "special": false
1268
  },
1269
  "158": {
1270
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1271
  "lstrip": false,
1272
  "normalized": false,
1273
  "rstrip": false,
 
1275
  "special": false
1276
  },
1277
  "159": {
1278
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1279
  "lstrip": false,
1280
  "normalized": false,
1281
  "rstrip": false,
 
1283
  "special": false
1284
  },
1285
  "160": {
1286
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1287
  "lstrip": false,
1288
  "normalized": false,
1289
  "rstrip": false,
 
1291
  "special": false
1292
  },
1293
  "161": {
1294
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1295
  "lstrip": false,
1296
  "normalized": false,
1297
  "rstrip": false,
 
1299
  "special": false
1300
  },
1301
  "162": {
1302
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1303
  "lstrip": false,
1304
  "normalized": false,
1305
  "rstrip": false,
 
1307
  "special": false
1308
  },
1309
  "163": {
1310
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1311
  "lstrip": false,
1312
  "normalized": false,
1313
  "rstrip": false,
 
1315
  "special": false
1316
  },
1317
  "164": {
1318
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1319
  "lstrip": false,
1320
  "normalized": false,
1321
  "rstrip": false,
 
1323
  "special": false
1324
  },
1325
  "165": {
1326
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1327
  "lstrip": false,
1328
  "normalized": false,
1329
  "rstrip": false,
 
1331
  "special": false
1332
  },
1333
  "166": {
1334
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1335
  "lstrip": false,
1336
  "normalized": false,
1337
  "rstrip": false,
 
1339
  "special": false
1340
  },
1341
  "167": {
1342
+ "content": "\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581\u2581",
1343
  "lstrip": false,
1344
  "normalized": false,
1345
  "rstrip": false,
 
51344
  "tokenizer_class": "GemmaTokenizer",
51345
  "unk_token": "<unk>",
51346
  "use_default_system_prompt": false
51347
+ }