danielhanchen commited on
Commit
93659ba
·
verified ·
1 Parent(s): d82bd7a

Add files using upload-large-folder tool

Browse files
chat_template.jinja ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'image' -%}
33
+ {{ '<start_of_image>' }}
34
+ {%- elif item['type'] == 'text' -%}
35
+ {{ item['text'] | trim }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{ raise_exception("Invalid content type") }}
40
+ {%- endif -%}
41
+ {{ '<end_of_turn>
42
+ ' }}
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ {{'<start_of_turn>model
46
+ '}}
47
+ {%- endif -%}
config.json CHANGED
@@ -25,81 +25,81 @@
25
  "multi_modal_projector",
26
  "merger",
27
  "modality_projection",
28
- "language_model.model.layers.14.mlp",
29
- "language_model.model.layers.2.self_attn",
30
- "language_model.model.layers.12.mlp",
31
- "language_model.model.layers.8.self_attn",
32
  "language_model.model.layers.6.self_attn",
33
- "language_model.model.layers.13.mlp",
34
- "language_model.model.layers.1.mlp",
35
- "language_model.model.layers.10.mlp",
36
- "language_model.model.layers.16.mlp",
 
37
  "language_model.model.layers.5.self_attn",
38
- "language_model.model.layers.2.mlp",
39
- "language_model.model.layers.4.mlp",
40
- "language_model.model.layers.5.mlp",
41
  "language_model.model.layers.9.mlp",
42
- "language_model.model.layers.7.self_attn",
43
- "language_model.model.layers.6.mlp",
44
- "language_model.model.layers.8.mlp",
45
- "vision_tower.vision_model.encoder.layers.23.self_attn",
46
- "language_model.model.layers.7.mlp",
47
- "language_model.model.layers.3.self_attn",
48
- "language_model.model.layers.11.mlp",
49
  "language_model.model.layers.3.mlp",
 
 
50
  "vision_tower.vision_model.encoder.layers.19.self_attn",
51
- "vision_tower.vision_model.encoder.layers.25.self_attn",
52
- "vision_tower.vision_model.encoder.layers.22.self_attn",
53
- "vision_tower.vision_model.encoder.layers.21.self_attn",
54
- "vision_tower.vision_model.encoder.layers.18.self_attn",
55
- "vision_tower.vision_model.encoder.layers.17.mlp",
56
- "vision_tower.vision_model.encoder.layers.24.self_attn",
57
- "vision_tower.vision_model.encoder.layers.24.mlp",
58
- "vision_tower.vision_model.encoder.layers.26.self_attn",
59
- "vision_tower.vision_model.encoder.layers.20.mlp",
60
  "vision_tower.vision_model.encoder.layers.20.self_attn",
61
- "vision_tower.vision_model.encoder.layers.23.mlp",
 
 
 
 
 
62
  "vision_tower.vision_model.encoder.layers.19.mlp",
63
- "vision_tower.vision_model.encoder.layers.15.self_attn",
64
- "vision_tower.vision_model.encoder.layers.14.mlp",
65
- "vision_tower.vision_model.encoder.layers.13.mlp",
66
- "vision_tower.vision_model.encoder.layers.21.mlp",
67
- "vision_tower.vision_model.encoder.layers.16.mlp",
68
  "vision_tower.vision_model.encoder.layers.25.mlp",
69
- "vision_tower.vision_model.encoder.layers.15.mlp",
70
- "vision_tower.vision_model.encoder.layers.14.self_attn",
71
- "vision_tower.vision_model.encoder.layers.16.self_attn",
72
- "vision_tower.vision_model.encoder.layers.10.mlp",
73
  "vision_tower.vision_model.encoder.layers.17.self_attn",
74
- "vision_tower.vision_model.encoder.layers.9.mlp",
75
- "vision_tower.vision_model.encoder.layers.11.mlp",
76
- "vision_tower.vision_model.encoder.layers.12.mlp",
77
- "vision_tower.vision_model.encoder.layers.10.self_attn",
78
  "vision_tower.vision_model.encoder.layers.22.mlp",
79
- "vision_tower.vision_model.encoder.layers.13.self_attn",
80
- "vision_tower.vision_model.encoder.layers.18.mlp",
81
- "vision_tower.vision_model.encoder.layers.12.self_attn",
 
 
82
  "vision_tower.vision_model.encoder.layers.7.mlp",
83
- "vision_tower.vision_model.encoder.layers.6.mlp",
84
- "vision_tower.vision_model.encoder.layers.8.self_attn",
85
  "vision_tower.vision_model.encoder.layers.9.self_attn",
86
- "vision_tower.vision_model.encoder.layers.4.mlp",
 
 
87
  "vision_tower.vision_model.encoder.layers.5.mlp",
 
 
 
 
 
 
88
  "vision_tower.vision_model.encoder.layers.8.mlp",
89
- "vision_tower.vision_model.encoder.layers.1.mlp",
90
  "vision_tower.vision_model.encoder.layers.6.self_attn",
91
- "vision_tower.vision_model.encoder.layers.4.self_attn",
92
- "vision_tower.vision_model.encoder.layers.11.self_attn",
93
- "vision_tower.vision_model.encoder.layers.7.self_attn",
94
- "vision_tower.vision_model.encoder.layers.0.self_attn",
95
- "vision_tower.vision_model.encoder.layers.3.mlp",
96
- "vision_tower.vision_model.encoder.layers.1.self_attn",
97
- "vision_tower.vision_model.encoder.layers.2.mlp",
98
  "vision_tower.vision_model.encoder.layers.5.self_attn",
 
99
  "vision_tower.vision_model.encoder.layers.3.self_attn",
100
- "vision_tower.vision_model.encoder.layers.0.mlp",
 
 
101
  "vision_tower.vision_model.encoder.layers.26.mlp",
102
- "vision_tower.vision_model.encoder.layers.2.self_attn"
 
 
 
 
 
 
 
 
 
 
103
  ],
104
  "llm_int8_threshold": 6.0,
105
  "load_in_4bit": true,
@@ -137,7 +137,7 @@
137
  "vocab_size": 262208
138
  },
139
  "torch_dtype": "bfloat16",
140
- "transformers_version": "4.51.0",
141
  "unsloth_fixed": true,
142
  "vision_config": {
143
  "attention_dropout": 0.0,
 
25
  "multi_modal_projector",
26
  "merger",
27
  "modality_projection",
28
+ "language_model.model.layers.22.self_attn",
29
+ "language_model.model.layers.18.self_attn",
30
+ "language_model.model.layers.8.mlp",
31
+ "language_model.model.layers.11.mlp",
32
  "language_model.model.layers.6.self_attn",
33
+ "language_model.model.layers.19.self_attn",
34
+ "vision_tower.vision_model.encoder.layers.24.self_attn",
35
+ "language_model.model.layers.14.self_attn",
36
+ "language_model.model.layers.7.self_attn",
37
+ "language_model.model.layers.17.mlp",
38
  "language_model.model.layers.5.self_attn",
 
 
 
39
  "language_model.model.layers.9.mlp",
 
 
 
 
 
 
 
40
  "language_model.model.layers.3.mlp",
41
+ "language_model.model.layers.3.self_attn",
42
+ "language_model.model.layers.7.mlp",
43
  "vision_tower.vision_model.encoder.layers.19.self_attn",
44
+ "language_model.model.layers.15.mlp",
45
+ "language_model.model.layers.11.self_attn",
 
 
 
 
 
 
 
46
  "vision_tower.vision_model.encoder.layers.20.self_attn",
47
+ "language_model.model.layers.4.mlp",
48
+ "vision_tower.vision_model.encoder.layers.26.self_attn",
49
+ "vision_tower.vision_model.encoder.layers.22.self_attn",
50
+ "language_model.model.layers.5.mlp",
51
+ "language_model.model.layers.13.mlp",
52
+ "language_model.model.layers.2.mlp",
53
  "vision_tower.vision_model.encoder.layers.19.mlp",
54
+ "vision_tower.vision_model.encoder.layers.25.self_attn",
55
+ "vision_tower.vision_model.encoder.layers.24.mlp",
56
+ "vision_tower.vision_model.encoder.layers.23.self_attn",
57
+ "language_model.model.layers.6.mlp",
 
58
  "vision_tower.vision_model.encoder.layers.25.mlp",
 
 
 
 
59
  "vision_tower.vision_model.encoder.layers.17.self_attn",
60
+ "vision_tower.vision_model.encoder.layers.21.self_attn",
61
+ "vision_tower.vision_model.encoder.layers.16.self_attn",
62
+ "vision_tower.vision_model.encoder.layers.21.mlp",
63
+ "vision_tower.vision_model.encoder.layers.13.mlp",
64
  "vision_tower.vision_model.encoder.layers.22.mlp",
65
+ "vision_tower.vision_model.encoder.layers.17.mlp",
66
+ "vision_tower.vision_model.encoder.layers.20.mlp",
67
+ "vision_tower.vision_model.encoder.layers.14.self_attn",
68
+ "vision_tower.vision_model.encoder.layers.16.mlp",
69
+ "vision_tower.vision_model.encoder.layers.18.self_attn",
70
  "vision_tower.vision_model.encoder.layers.7.mlp",
 
 
71
  "vision_tower.vision_model.encoder.layers.9.self_attn",
72
+ "vision_tower.vision_model.encoder.layers.15.mlp",
73
+ "vision_tower.vision_model.encoder.layers.18.mlp",
74
+ "vision_tower.vision_model.encoder.layers.12.mlp",
75
  "vision_tower.vision_model.encoder.layers.5.mlp",
76
+ "vision_tower.vision_model.encoder.layers.9.mlp",
77
+ "vision_tower.vision_model.encoder.layers.14.mlp",
78
+ "vision_tower.vision_model.encoder.layers.15.self_attn",
79
+ "vision_tower.vision_model.encoder.layers.11.mlp",
80
+ "vision_tower.vision_model.encoder.layers.10.mlp",
81
+ "vision_tower.vision_model.encoder.layers.23.mlp",
82
  "vision_tower.vision_model.encoder.layers.8.mlp",
83
+ "vision_tower.vision_model.encoder.layers.13.self_attn",
84
  "vision_tower.vision_model.encoder.layers.6.self_attn",
 
 
 
 
 
 
 
85
  "vision_tower.vision_model.encoder.layers.5.self_attn",
86
+ "vision_tower.vision_model.encoder.layers.1.self_attn",
87
  "vision_tower.vision_model.encoder.layers.3.self_attn",
88
+ "vision_tower.vision_model.encoder.layers.4.mlp",
89
+ "vision_tower.vision_model.encoder.layers.7.self_attn",
90
+ "vision_tower.vision_model.encoder.layers.11.self_attn",
91
  "vision_tower.vision_model.encoder.layers.26.mlp",
92
+ "vision_tower.vision_model.encoder.layers.4.self_attn",
93
+ "vision_tower.vision_model.encoder.layers.12.self_attn",
94
+ "vision_tower.vision_model.encoder.layers.8.self_attn",
95
+ "vision_tower.vision_model.encoder.layers.10.self_attn",
96
+ "vision_tower.vision_model.encoder.layers.3.mlp",
97
+ "vision_tower.vision_model.encoder.layers.6.mlp",
98
+ "vision_tower.vision_model.encoder.layers.2.mlp",
99
+ "vision_tower.vision_model.encoder.layers.2.self_attn",
100
+ "vision_tower.vision_model.encoder.layers.1.mlp",
101
+ "vision_tower.vision_model.encoder.layers.0.self_attn",
102
+ "vision_tower.vision_model.encoder.layers.0.mlp"
103
  ],
104
  "llm_int8_threshold": 6.0,
105
  "load_in_4bit": true,
 
137
  "vocab_size": 262208
138
  },
139
  "torch_dtype": "bfloat16",
140
+ "transformers_version": "4.52.0.dev0",
141
  "unsloth_fixed": true,
142
  "vision_config": {
143
  "attention_dropout": 0.0,
generation_config.json CHANGED
@@ -9,5 +9,5 @@
9
  "pad_token_id": 0,
10
  "top_k": 64,
11
  "top_p": 0.95,
12
- "transformers_version": "4.51.0"
13
  }
 
9
  "pad_token_id": 0,
10
  "top_k": 64,
11
  "top_p": 0.95,
12
+ "transformers_version": "4.52.0.dev0"
13
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7476392ff2404c2fdf0d9640f4564a0e037a4a21ef97068bd7b4bc0334980963
3
- size 4955527760
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e329a638c49d6f8e6d8649198401c24ca2e075ab32ac3ff0613de933b64abdf6
3
+ size 4953233186
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09fed786bbe92e4eb31e66f2f2a8032fba3437151018914ea015fd79df861e98
3
- size 4975309932
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c73460988ff2f0faf63d0be2e39271cce9b84658eb49cd6a4e1b7e66d5363e32
3
+ size 4978202196
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1aac81fb4a479e51c4194e403cec2cba634e4018cb70da4cc0f2b0e493ad816a
3
- size 2836985436
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7850b744104a7591a92439ed18e2e20499d23cef4616c5ba24bc68c38a99ff71
3
+ size 2258633570
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 12767500920
4
  },
5
  "weight_map": {
6
  "language_model.model.embed_tokens.weight": "model-00001-of-00003.safetensors",
@@ -54,8 +54,23 @@
54
  "language_model.model.layers.0.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
55
  "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
56
  "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
57
  "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
58
  "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
59
  "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
60
  "language_model.model.layers.1.post_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
61
  "language_model.model.layers.1.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
@@ -87,8 +102,23 @@
87
  "language_model.model.layers.1.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
88
  "language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
89
  "language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
90
  "language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
91
  "language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
92
  "language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
93
  "language_model.model.layers.10.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
94
  "language_model.model.layers.10.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
@@ -127,34 +157,29 @@
127
  "language_model.model.layers.11.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
128
  "language_model.model.layers.11.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
129
  "language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
130
- "language_model.model.layers.11.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
131
- "language_model.model.layers.11.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
132
- "language_model.model.layers.11.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
133
- "language_model.model.layers.11.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
134
- "language_model.model.layers.11.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
135
  "language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
136
- "language_model.model.layers.11.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
137
- "language_model.model.layers.11.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
138
- "language_model.model.layers.11.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
139
- "language_model.model.layers.11.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
140
- "language_model.model.layers.11.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
141
  "language_model.model.layers.11.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
142
  "language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
143
- "language_model.model.layers.11.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
144
- "language_model.model.layers.11.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
145
- "language_model.model.layers.11.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
146
- "language_model.model.layers.11.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
147
- "language_model.model.layers.11.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
148
  "language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
149
- "language_model.model.layers.11.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
150
- "language_model.model.layers.11.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
151
- "language_model.model.layers.11.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
152
- "language_model.model.layers.11.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
153
- "language_model.model.layers.11.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
154
  "language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
155
  "language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
156
  "language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
157
  "language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
158
  "language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
159
  "language_model.model.layers.12.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
160
  "language_model.model.layers.12.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
@@ -219,56 +244,36 @@
219
  "language_model.model.layers.13.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
220
  "language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
221
  "language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
222
  "language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
223
  "language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
224
  "language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
225
  "language_model.model.layers.14.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
226
  "language_model.model.layers.14.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
227
  "language_model.model.layers.14.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
228
  "language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
229
- "language_model.model.layers.14.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
230
- "language_model.model.layers.14.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
231
- "language_model.model.layers.14.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
232
- "language_model.model.layers.14.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
233
- "language_model.model.layers.14.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
234
  "language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
235
- "language_model.model.layers.14.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
236
- "language_model.model.layers.14.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
237
- "language_model.model.layers.14.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
238
- "language_model.model.layers.14.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
239
- "language_model.model.layers.14.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
240
  "language_model.model.layers.14.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
241
  "language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
242
- "language_model.model.layers.14.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
243
- "language_model.model.layers.14.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
244
- "language_model.model.layers.14.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
245
- "language_model.model.layers.14.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
246
- "language_model.model.layers.14.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
247
  "language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
248
- "language_model.model.layers.14.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
249
- "language_model.model.layers.14.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
250
- "language_model.model.layers.14.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
251
- "language_model.model.layers.14.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
252
- "language_model.model.layers.14.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
253
  "language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
254
  "language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
255
- "language_model.model.layers.15.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
256
- "language_model.model.layers.15.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
257
- "language_model.model.layers.15.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
258
- "language_model.model.layers.15.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
259
- "language_model.model.layers.15.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
260
  "language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
261
- "language_model.model.layers.15.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
262
- "language_model.model.layers.15.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
263
- "language_model.model.layers.15.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
264
- "language_model.model.layers.15.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
265
- "language_model.model.layers.15.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
266
  "language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
267
- "language_model.model.layers.15.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
268
- "language_model.model.layers.15.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
269
- "language_model.model.layers.15.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
270
- "language_model.model.layers.15.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
271
- "language_model.model.layers.15.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
272
  "language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
273
  "language_model.model.layers.15.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
274
  "language_model.model.layers.15.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
@@ -300,8 +305,23 @@
300
  "language_model.model.layers.15.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
301
  "language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
302
  "language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
303
  "language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
304
  "language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
305
  "language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
306
  "language_model.model.layers.16.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
307
  "language_model.model.layers.16.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
@@ -333,23 +353,8 @@
333
  "language_model.model.layers.16.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
334
  "language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
335
  "language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
336
- "language_model.model.layers.17.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
337
- "language_model.model.layers.17.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
338
- "language_model.model.layers.17.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
339
- "language_model.model.layers.17.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
340
- "language_model.model.layers.17.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
341
  "language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
342
- "language_model.model.layers.17.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
343
- "language_model.model.layers.17.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
344
- "language_model.model.layers.17.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
345
- "language_model.model.layers.17.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
346
- "language_model.model.layers.17.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
347
  "language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
348
- "language_model.model.layers.17.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
349
- "language_model.model.layers.17.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
350
- "language_model.model.layers.17.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
351
- "language_model.model.layers.17.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
352
- "language_model.model.layers.17.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
353
  "language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
354
  "language_model.model.layers.17.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
355
  "language_model.model.layers.17.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
@@ -403,30 +408,10 @@
403
  "language_model.model.layers.18.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
404
  "language_model.model.layers.18.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
405
  "language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
406
- "language_model.model.layers.18.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
407
- "language_model.model.layers.18.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
408
- "language_model.model.layers.18.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
409
- "language_model.model.layers.18.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
410
- "language_model.model.layers.18.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
411
  "language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
412
- "language_model.model.layers.18.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
413
- "language_model.model.layers.18.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
414
- "language_model.model.layers.18.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
415
- "language_model.model.layers.18.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
416
- "language_model.model.layers.18.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
417
  "language_model.model.layers.18.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
418
  "language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
419
- "language_model.model.layers.18.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
420
- "language_model.model.layers.18.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
421
- "language_model.model.layers.18.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
422
- "language_model.model.layers.18.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
423
- "language_model.model.layers.18.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
424
  "language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
425
- "language_model.model.layers.18.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
426
- "language_model.model.layers.18.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
427
- "language_model.model.layers.18.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
428
- "language_model.model.layers.18.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
429
- "language_model.model.layers.18.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
430
  "language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
431
  "language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
432
  "language_model.model.layers.19.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
@@ -451,30 +436,10 @@
451
  "language_model.model.layers.19.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
452
  "language_model.model.layers.19.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
453
  "language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
454
- "language_model.model.layers.19.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
455
- "language_model.model.layers.19.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
456
- "language_model.model.layers.19.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
457
- "language_model.model.layers.19.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
458
- "language_model.model.layers.19.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
459
  "language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
460
- "language_model.model.layers.19.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
461
- "language_model.model.layers.19.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
462
- "language_model.model.layers.19.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
463
- "language_model.model.layers.19.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
464
- "language_model.model.layers.19.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
465
  "language_model.model.layers.19.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
466
  "language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
467
- "language_model.model.layers.19.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
468
- "language_model.model.layers.19.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
469
- "language_model.model.layers.19.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
470
- "language_model.model.layers.19.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
471
- "language_model.model.layers.19.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
472
  "language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
473
- "language_model.model.layers.19.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
474
- "language_model.model.layers.19.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
475
- "language_model.model.layers.19.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
476
- "language_model.model.layers.19.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
477
- "language_model.model.layers.19.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
478
  "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
479
  "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
480
  "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
@@ -484,10 +449,30 @@
484
  "language_model.model.layers.2.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
485
  "language_model.model.layers.2.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
486
  "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
487
  "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
488
  "language_model.model.layers.2.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
489
  "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
490
  "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
 
 
 
 
 
491
  "language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
492
  "language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
493
  "language_model.model.layers.20.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
@@ -608,52 +593,32 @@
608
  "language_model.model.layers.22.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
609
  "language_model.model.layers.22.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
610
  "language_model.model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
611
- "language_model.model.layers.22.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
612
- "language_model.model.layers.22.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
613
- "language_model.model.layers.22.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
614
- "language_model.model.layers.22.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
615
- "language_model.model.layers.22.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
616
  "language_model.model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
617
- "language_model.model.layers.22.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
618
- "language_model.model.layers.22.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
619
- "language_model.model.layers.22.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
620
- "language_model.model.layers.22.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
621
- "language_model.model.layers.22.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
622
  "language_model.model.layers.22.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
623
  "language_model.model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
624
- "language_model.model.layers.22.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
625
- "language_model.model.layers.22.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
626
- "language_model.model.layers.22.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
627
- "language_model.model.layers.22.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
628
- "language_model.model.layers.22.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
629
  "language_model.model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
630
- "language_model.model.layers.22.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
631
- "language_model.model.layers.22.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
632
- "language_model.model.layers.22.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
633
- "language_model.model.layers.22.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
634
- "language_model.model.layers.22.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
635
- "language_model.model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
636
- "language_model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
637
- "language_model.model.layers.23.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
638
- "language_model.model.layers.23.mlp.down_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
639
- "language_model.model.layers.23.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
640
- "language_model.model.layers.23.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
641
- "language_model.model.layers.23.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
642
  "language_model.model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
643
  "language_model.model.layers.23.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
644
  "language_model.model.layers.23.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
645
  "language_model.model.layers.23.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
646
  "language_model.model.layers.23.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
647
  "language_model.model.layers.23.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
648
- "language_model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
649
- "language_model.model.layers.23.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
650
- "language_model.model.layers.23.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
651
- "language_model.model.layers.23.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
652
- "language_model.model.layers.23.mlp.up_proj.weight.quant_map": "model-00003-of-00003.safetensors",
653
- "language_model.model.layers.23.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
654
- "language_model.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
655
- "language_model.model.layers.23.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
656
- "language_model.model.layers.23.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
657
  "language_model.model.layers.23.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
658
  "language_model.model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
659
  "language_model.model.layers.23.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
@@ -680,198 +645,198 @@
680
  "language_model.model.layers.23.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
681
  "language_model.model.layers.23.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
682
  "language_model.model.layers.23.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
683
- "language_model.model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
684
- "language_model.model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
685
- "language_model.model.layers.24.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
686
- "language_model.model.layers.24.mlp.down_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
687
- "language_model.model.layers.24.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
688
- "language_model.model.layers.24.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
689
- "language_model.model.layers.24.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
690
- "language_model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
691
- "language_model.model.layers.24.mlp.gate_proj.weight.absmax": "model-00003-of-00003.safetensors",
692
- "language_model.model.layers.24.mlp.gate_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
693
- "language_model.model.layers.24.mlp.gate_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
694
- "language_model.model.layers.24.mlp.gate_proj.weight.quant_map": "model-00003-of-00003.safetensors",
695
- "language_model.model.layers.24.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
696
- "language_model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
697
- "language_model.model.layers.24.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
698
- "language_model.model.layers.24.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
699
- "language_model.model.layers.24.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
700
- "language_model.model.layers.24.mlp.up_proj.weight.quant_map": "model-00003-of-00003.safetensors",
701
- "language_model.model.layers.24.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
702
- "language_model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
703
- "language_model.model.layers.24.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
704
- "language_model.model.layers.24.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
705
- "language_model.model.layers.24.self_attn.k_norm.weight": "model-00003-of-00003.safetensors",
706
- "language_model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
707
- "language_model.model.layers.24.self_attn.k_proj.weight.absmax": "model-00003-of-00003.safetensors",
708
- "language_model.model.layers.24.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
709
- "language_model.model.layers.24.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
710
- "language_model.model.layers.24.self_attn.k_proj.weight.quant_map": "model-00003-of-00003.safetensors",
711
- "language_model.model.layers.24.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
712
- "language_model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
713
- "language_model.model.layers.24.self_attn.o_proj.weight.absmax": "model-00003-of-00003.safetensors",
714
- "language_model.model.layers.24.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
715
- "language_model.model.layers.24.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
716
- "language_model.model.layers.24.self_attn.o_proj.weight.quant_map": "model-00003-of-00003.safetensors",
717
- "language_model.model.layers.24.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
718
- "language_model.model.layers.24.self_attn.q_norm.weight": "model-00003-of-00003.safetensors",
719
- "language_model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
720
- "language_model.model.layers.24.self_attn.q_proj.weight.absmax": "model-00003-of-00003.safetensors",
721
- "language_model.model.layers.24.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
722
- "language_model.model.layers.24.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
723
- "language_model.model.layers.24.self_attn.q_proj.weight.quant_map": "model-00003-of-00003.safetensors",
724
- "language_model.model.layers.24.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
725
- "language_model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
726
- "language_model.model.layers.24.self_attn.v_proj.weight.absmax": "model-00003-of-00003.safetensors",
727
- "language_model.model.layers.24.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
728
- "language_model.model.layers.24.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
729
- "language_model.model.layers.24.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
730
- "language_model.model.layers.24.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
731
- "language_model.model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
732
- "language_model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
733
- "language_model.model.layers.25.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
734
- "language_model.model.layers.25.mlp.down_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
735
- "language_model.model.layers.25.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
736
- "language_model.model.layers.25.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
737
- "language_model.model.layers.25.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
738
- "language_model.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
739
- "language_model.model.layers.25.mlp.gate_proj.weight.absmax": "model-00003-of-00003.safetensors",
740
- "language_model.model.layers.25.mlp.gate_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
741
- "language_model.model.layers.25.mlp.gate_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
742
- "language_model.model.layers.25.mlp.gate_proj.weight.quant_map": "model-00003-of-00003.safetensors",
743
- "language_model.model.layers.25.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
744
- "language_model.model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
745
- "language_model.model.layers.25.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
746
- "language_model.model.layers.25.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
747
- "language_model.model.layers.25.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
748
- "language_model.model.layers.25.mlp.up_proj.weight.quant_map": "model-00003-of-00003.safetensors",
749
- "language_model.model.layers.25.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
750
- "language_model.model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
751
- "language_model.model.layers.25.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
752
- "language_model.model.layers.25.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
753
- "language_model.model.layers.25.self_attn.k_norm.weight": "model-00003-of-00003.safetensors",
754
- "language_model.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
755
- "language_model.model.layers.25.self_attn.k_proj.weight.absmax": "model-00003-of-00003.safetensors",
756
- "language_model.model.layers.25.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
757
- "language_model.model.layers.25.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
758
- "language_model.model.layers.25.self_attn.k_proj.weight.quant_map": "model-00003-of-00003.safetensors",
759
- "language_model.model.layers.25.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
760
- "language_model.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
761
- "language_model.model.layers.25.self_attn.o_proj.weight.absmax": "model-00003-of-00003.safetensors",
762
- "language_model.model.layers.25.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
763
- "language_model.model.layers.25.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
764
- "language_model.model.layers.25.self_attn.o_proj.weight.quant_map": "model-00003-of-00003.safetensors",
765
- "language_model.model.layers.25.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
766
- "language_model.model.layers.25.self_attn.q_norm.weight": "model-00003-of-00003.safetensors",
767
- "language_model.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
768
- "language_model.model.layers.25.self_attn.q_proj.weight.absmax": "model-00003-of-00003.safetensors",
769
- "language_model.model.layers.25.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
770
- "language_model.model.layers.25.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
771
- "language_model.model.layers.25.self_attn.q_proj.weight.quant_map": "model-00003-of-00003.safetensors",
772
- "language_model.model.layers.25.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
773
- "language_model.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
774
- "language_model.model.layers.25.self_attn.v_proj.weight.absmax": "model-00003-of-00003.safetensors",
775
- "language_model.model.layers.25.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
776
- "language_model.model.layers.25.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
777
- "language_model.model.layers.25.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
778
- "language_model.model.layers.25.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
779
- "language_model.model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
780
- "language_model.model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
781
- "language_model.model.layers.26.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
782
- "language_model.model.layers.26.mlp.down_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
783
- "language_model.model.layers.26.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
784
- "language_model.model.layers.26.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
785
- "language_model.model.layers.26.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
786
- "language_model.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
787
- "language_model.model.layers.26.mlp.gate_proj.weight.absmax": "model-00003-of-00003.safetensors",
788
- "language_model.model.layers.26.mlp.gate_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
789
- "language_model.model.layers.26.mlp.gate_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
790
- "language_model.model.layers.26.mlp.gate_proj.weight.quant_map": "model-00003-of-00003.safetensors",
791
- "language_model.model.layers.26.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
792
- "language_model.model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
793
- "language_model.model.layers.26.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
794
- "language_model.model.layers.26.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
795
- "language_model.model.layers.26.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
796
- "language_model.model.layers.26.mlp.up_proj.weight.quant_map": "model-00003-of-00003.safetensors",
797
- "language_model.model.layers.26.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
798
- "language_model.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
799
- "language_model.model.layers.26.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
800
- "language_model.model.layers.26.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
801
- "language_model.model.layers.26.self_attn.k_norm.weight": "model-00003-of-00003.safetensors",
802
- "language_model.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
803
- "language_model.model.layers.26.self_attn.k_proj.weight.absmax": "model-00003-of-00003.safetensors",
804
- "language_model.model.layers.26.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
805
- "language_model.model.layers.26.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
806
- "language_model.model.layers.26.self_attn.k_proj.weight.quant_map": "model-00003-of-00003.safetensors",
807
- "language_model.model.layers.26.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
808
- "language_model.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
809
- "language_model.model.layers.26.self_attn.o_proj.weight.absmax": "model-00003-of-00003.safetensors",
810
- "language_model.model.layers.26.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
811
- "language_model.model.layers.26.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
812
- "language_model.model.layers.26.self_attn.o_proj.weight.quant_map": "model-00003-of-00003.safetensors",
813
- "language_model.model.layers.26.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
814
- "language_model.model.layers.26.self_attn.q_norm.weight": "model-00003-of-00003.safetensors",
815
- "language_model.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
816
- "language_model.model.layers.26.self_attn.q_proj.weight.absmax": "model-00003-of-00003.safetensors",
817
- "language_model.model.layers.26.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
818
- "language_model.model.layers.26.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
819
- "language_model.model.layers.26.self_attn.q_proj.weight.quant_map": "model-00003-of-00003.safetensors",
820
- "language_model.model.layers.26.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
821
- "language_model.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
822
- "language_model.model.layers.26.self_attn.v_proj.weight.absmax": "model-00003-of-00003.safetensors",
823
- "language_model.model.layers.26.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
824
- "language_model.model.layers.26.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
825
- "language_model.model.layers.26.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
826
- "language_model.model.layers.26.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
827
- "language_model.model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
828
- "language_model.model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
829
- "language_model.model.layers.27.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
830
- "language_model.model.layers.27.mlp.down_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
831
- "language_model.model.layers.27.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
832
- "language_model.model.layers.27.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
833
- "language_model.model.layers.27.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
834
- "language_model.model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
835
- "language_model.model.layers.27.mlp.gate_proj.weight.absmax": "model-00003-of-00003.safetensors",
836
- "language_model.model.layers.27.mlp.gate_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
837
- "language_model.model.layers.27.mlp.gate_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
838
- "language_model.model.layers.27.mlp.gate_proj.weight.quant_map": "model-00003-of-00003.safetensors",
839
- "language_model.model.layers.27.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
840
- "language_model.model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
841
- "language_model.model.layers.27.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
842
- "language_model.model.layers.27.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
843
- "language_model.model.layers.27.mlp.up_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
844
- "language_model.model.layers.27.mlp.up_proj.weight.quant_map": "model-00003-of-00003.safetensors",
845
- "language_model.model.layers.27.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
846
- "language_model.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
847
- "language_model.model.layers.27.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
848
- "language_model.model.layers.27.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
849
- "language_model.model.layers.27.self_attn.k_norm.weight": "model-00003-of-00003.safetensors",
850
- "language_model.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
851
- "language_model.model.layers.27.self_attn.k_proj.weight.absmax": "model-00003-of-00003.safetensors",
852
- "language_model.model.layers.27.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
853
- "language_model.model.layers.27.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
854
- "language_model.model.layers.27.self_attn.k_proj.weight.quant_map": "model-00003-of-00003.safetensors",
855
- "language_model.model.layers.27.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
856
- "language_model.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
857
- "language_model.model.layers.27.self_attn.o_proj.weight.absmax": "model-00003-of-00003.safetensors",
858
- "language_model.model.layers.27.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
859
- "language_model.model.layers.27.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
860
- "language_model.model.layers.27.self_attn.o_proj.weight.quant_map": "model-00003-of-00003.safetensors",
861
- "language_model.model.layers.27.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
862
- "language_model.model.layers.27.self_attn.q_norm.weight": "model-00003-of-00003.safetensors",
863
- "language_model.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
864
- "language_model.model.layers.27.self_attn.q_proj.weight.absmax": "model-00003-of-00003.safetensors",
865
- "language_model.model.layers.27.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
866
- "language_model.model.layers.27.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
867
- "language_model.model.layers.27.self_attn.q_proj.weight.quant_map": "model-00003-of-00003.safetensors",
868
- "language_model.model.layers.27.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
869
- "language_model.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
870
- "language_model.model.layers.27.self_attn.v_proj.weight.absmax": "model-00003-of-00003.safetensors",
871
- "language_model.model.layers.27.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
872
- "language_model.model.layers.27.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
873
- "language_model.model.layers.27.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
874
- "language_model.model.layers.27.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
875
  "language_model.model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
876
  "language_model.model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
877
  "language_model.model.layers.28.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
@@ -879,12 +844,12 @@
879
  "language_model.model.layers.28.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
880
  "language_model.model.layers.28.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
881
  "language_model.model.layers.28.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
882
- "language_model.model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
883
- "language_model.model.layers.28.mlp.gate_proj.weight.absmax": "model-00003-of-00003.safetensors",
884
- "language_model.model.layers.28.mlp.gate_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
885
- "language_model.model.layers.28.mlp.gate_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
886
- "language_model.model.layers.28.mlp.gate_proj.weight.quant_map": "model-00003-of-00003.safetensors",
887
- "language_model.model.layers.28.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
888
  "language_model.model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
889
  "language_model.model.layers.28.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
890
  "language_model.model.layers.28.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
@@ -894,32 +859,32 @@
894
  "language_model.model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
895
  "language_model.model.layers.28.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
896
  "language_model.model.layers.28.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
897
- "language_model.model.layers.28.self_attn.k_norm.weight": "model-00003-of-00003.safetensors",
898
- "language_model.model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
899
- "language_model.model.layers.28.self_attn.k_proj.weight.absmax": "model-00003-of-00003.safetensors",
900
- "language_model.model.layers.28.self_attn.k_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
901
- "language_model.model.layers.28.self_attn.k_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
902
- "language_model.model.layers.28.self_attn.k_proj.weight.quant_map": "model-00003-of-00003.safetensors",
903
- "language_model.model.layers.28.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
904
- "language_model.model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
905
- "language_model.model.layers.28.self_attn.o_proj.weight.absmax": "model-00003-of-00003.safetensors",
906
- "language_model.model.layers.28.self_attn.o_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
907
- "language_model.model.layers.28.self_attn.o_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
908
- "language_model.model.layers.28.self_attn.o_proj.weight.quant_map": "model-00003-of-00003.safetensors",
909
- "language_model.model.layers.28.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
910
- "language_model.model.layers.28.self_attn.q_norm.weight": "model-00003-of-00003.safetensors",
911
- "language_model.model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
912
- "language_model.model.layers.28.self_attn.q_proj.weight.absmax": "model-00003-of-00003.safetensors",
913
- "language_model.model.layers.28.self_attn.q_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
914
- "language_model.model.layers.28.self_attn.q_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
915
- "language_model.model.layers.28.self_attn.q_proj.weight.quant_map": "model-00003-of-00003.safetensors",
916
- "language_model.model.layers.28.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
917
- "language_model.model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
918
- "language_model.model.layers.28.self_attn.v_proj.weight.absmax": "model-00003-of-00003.safetensors",
919
- "language_model.model.layers.28.self_attn.v_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
920
- "language_model.model.layers.28.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
921
- "language_model.model.layers.28.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
922
- "language_model.model.layers.28.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
923
  "language_model.model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
924
  "language_model.model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
925
  "language_model.model.layers.29.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
@@ -1878,13 +1843,13 @@
1878
  "language_model.model.layers.47.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
1879
  "language_model.model.layers.47.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
1880
  "language_model.model.layers.47.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
1881
- "language_model.model.layers.5.input_layernorm.weight": "model-00002-of-00003.safetensors",
1882
- "language_model.model.layers.5.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1883
  "language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
1884
  "language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
1885
- "language_model.model.layers.5.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
1886
- "language_model.model.layers.5.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1887
- "language_model.model.layers.5.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1888
  "language_model.model.layers.5.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
1889
  "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
1890
  "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
@@ -1893,17 +1858,17 @@
1893
  "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
1894
  "language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00003.safetensors",
1895
  "language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1896
- "language_model.model.layers.6.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
1897
  "language_model.model.layers.6.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
1898
  "language_model.model.layers.6.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
1899
  "language_model.model.layers.6.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1900
  "language_model.model.layers.6.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1901
- "language_model.model.layers.6.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
1902
- "language_model.model.layers.6.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
1903
- "language_model.model.layers.6.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
1904
- "language_model.model.layers.6.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
1905
- "language_model.model.layers.6.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
1906
- "language_model.model.layers.6.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
1907
  "language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00003.safetensors",
1908
  "language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1909
  "language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
@@ -1926,10 +1891,30 @@
1926
  "language_model.model.layers.8.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1927
  "language_model.model.layers.8.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
1928
  "language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
1929
  "language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
1930
  "language_model.model.layers.8.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
1931
  "language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
1932
  "language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
1933
  "language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
1934
  "language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1935
  "language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 12189748992
4
  },
5
  "weight_map": {
6
  "language_model.model.embed_tokens.weight": "model-00001-of-00003.safetensors",
 
54
  "language_model.model.layers.0.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
55
  "language_model.model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
56
  "language_model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
57
+ "language_model.model.layers.1.mlp.down_proj.weight.absmax": "model-00001-of-00003.safetensors",
58
+ "language_model.model.layers.1.mlp.down_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
59
+ "language_model.model.layers.1.mlp.down_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
60
+ "language_model.model.layers.1.mlp.down_proj.weight.quant_map": "model-00001-of-00003.safetensors",
61
+ "language_model.model.layers.1.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
62
  "language_model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
63
+ "language_model.model.layers.1.mlp.gate_proj.weight.absmax": "model-00001-of-00003.safetensors",
64
+ "language_model.model.layers.1.mlp.gate_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
65
+ "language_model.model.layers.1.mlp.gate_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
66
+ "language_model.model.layers.1.mlp.gate_proj.weight.quant_map": "model-00001-of-00003.safetensors",
67
+ "language_model.model.layers.1.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
68
  "language_model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
69
+ "language_model.model.layers.1.mlp.up_proj.weight.absmax": "model-00001-of-00003.safetensors",
70
+ "language_model.model.layers.1.mlp.up_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
71
+ "language_model.model.layers.1.mlp.up_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
72
+ "language_model.model.layers.1.mlp.up_proj.weight.quant_map": "model-00001-of-00003.safetensors",
73
+ "language_model.model.layers.1.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
74
  "language_model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
75
  "language_model.model.layers.1.post_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
76
  "language_model.model.layers.1.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
 
102
  "language_model.model.layers.1.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
103
  "language_model.model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
104
  "language_model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
105
+ "language_model.model.layers.10.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
106
+ "language_model.model.layers.10.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
107
+ "language_model.model.layers.10.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
108
+ "language_model.model.layers.10.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
109
+ "language_model.model.layers.10.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
110
  "language_model.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
111
+ "language_model.model.layers.10.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
112
+ "language_model.model.layers.10.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
113
+ "language_model.model.layers.10.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
114
+ "language_model.model.layers.10.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
115
+ "language_model.model.layers.10.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
116
  "language_model.model.layers.10.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
117
+ "language_model.model.layers.10.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
118
+ "language_model.model.layers.10.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
119
+ "language_model.model.layers.10.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
120
+ "language_model.model.layers.10.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
121
+ "language_model.model.layers.10.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
122
  "language_model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
123
  "language_model.model.layers.10.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
124
  "language_model.model.layers.10.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
 
157
  "language_model.model.layers.11.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
158
  "language_model.model.layers.11.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
159
  "language_model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
160
  "language_model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
161
  "language_model.model.layers.11.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
162
  "language_model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
163
  "language_model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
164
  "language_model.model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
165
  "language_model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
166
+ "language_model.model.layers.12.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
167
+ "language_model.model.layers.12.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
168
+ "language_model.model.layers.12.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
169
+ "language_model.model.layers.12.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
170
+ "language_model.model.layers.12.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
171
  "language_model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
172
+ "language_model.model.layers.12.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
173
+ "language_model.model.layers.12.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
174
+ "language_model.model.layers.12.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
175
+ "language_model.model.layers.12.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
176
+ "language_model.model.layers.12.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
177
  "language_model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
178
+ "language_model.model.layers.12.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
179
+ "language_model.model.layers.12.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
180
+ "language_model.model.layers.12.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
181
+ "language_model.model.layers.12.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
182
+ "language_model.model.layers.12.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
183
  "language_model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
184
  "language_model.model.layers.12.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
185
  "language_model.model.layers.12.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
 
244
  "language_model.model.layers.13.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
245
  "language_model.model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
246
  "language_model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
247
+ "language_model.model.layers.14.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
248
+ "language_model.model.layers.14.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
249
+ "language_model.model.layers.14.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
250
+ "language_model.model.layers.14.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
251
+ "language_model.model.layers.14.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
252
  "language_model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
253
+ "language_model.model.layers.14.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
254
+ "language_model.model.layers.14.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
255
+ "language_model.model.layers.14.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
256
+ "language_model.model.layers.14.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
257
+ "language_model.model.layers.14.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
258
  "language_model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
259
+ "language_model.model.layers.14.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
260
+ "language_model.model.layers.14.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
261
+ "language_model.model.layers.14.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
262
+ "language_model.model.layers.14.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
263
+ "language_model.model.layers.14.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
264
  "language_model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
265
  "language_model.model.layers.14.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
266
  "language_model.model.layers.14.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
267
  "language_model.model.layers.14.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
268
  "language_model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
269
  "language_model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
270
  "language_model.model.layers.14.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
271
  "language_model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
272
  "language_model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
273
  "language_model.model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
274
  "language_model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
275
  "language_model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
276
  "language_model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
277
  "language_model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
278
  "language_model.model.layers.15.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
279
  "language_model.model.layers.15.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
 
305
  "language_model.model.layers.15.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
306
  "language_model.model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
307
  "language_model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
308
+ "language_model.model.layers.16.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
309
+ "language_model.model.layers.16.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
310
+ "language_model.model.layers.16.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
311
+ "language_model.model.layers.16.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
312
+ "language_model.model.layers.16.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
313
  "language_model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
314
+ "language_model.model.layers.16.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
315
+ "language_model.model.layers.16.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
316
+ "language_model.model.layers.16.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
317
+ "language_model.model.layers.16.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
318
+ "language_model.model.layers.16.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
319
  "language_model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
320
+ "language_model.model.layers.16.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
321
+ "language_model.model.layers.16.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
322
+ "language_model.model.layers.16.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
323
+ "language_model.model.layers.16.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
324
+ "language_model.model.layers.16.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
325
  "language_model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
326
  "language_model.model.layers.16.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
327
  "language_model.model.layers.16.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
 
353
  "language_model.model.layers.16.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
354
  "language_model.model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
355
  "language_model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
356
  "language_model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
357
  "language_model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
358
  "language_model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
359
  "language_model.model.layers.17.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
360
  "language_model.model.layers.17.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
 
408
  "language_model.model.layers.18.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
409
  "language_model.model.layers.18.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
410
  "language_model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
411
  "language_model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
412
  "language_model.model.layers.18.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
413
  "language_model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
414
  "language_model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
415
  "language_model.model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
416
  "language_model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
417
  "language_model.model.layers.19.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
 
436
  "language_model.model.layers.19.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
437
  "language_model.model.layers.19.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
438
  "language_model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
439
  "language_model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
440
  "language_model.model.layers.19.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
441
  "language_model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
442
  "language_model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
443
  "language_model.model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
444
  "language_model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
445
  "language_model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
 
449
  "language_model.model.layers.2.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
450
  "language_model.model.layers.2.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
451
  "language_model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
452
+ "language_model.model.layers.2.self_attn.k_proj.weight.absmax": "model-00001-of-00003.safetensors",
453
+ "language_model.model.layers.2.self_attn.k_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
454
+ "language_model.model.layers.2.self_attn.k_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
455
+ "language_model.model.layers.2.self_attn.k_proj.weight.quant_map": "model-00001-of-00003.safetensors",
456
+ "language_model.model.layers.2.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
457
  "language_model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
458
+ "language_model.model.layers.2.self_attn.o_proj.weight.absmax": "model-00001-of-00003.safetensors",
459
+ "language_model.model.layers.2.self_attn.o_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
460
+ "language_model.model.layers.2.self_attn.o_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
461
+ "language_model.model.layers.2.self_attn.o_proj.weight.quant_map": "model-00001-of-00003.safetensors",
462
+ "language_model.model.layers.2.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
463
  "language_model.model.layers.2.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
464
  "language_model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
465
+ "language_model.model.layers.2.self_attn.q_proj.weight.absmax": "model-00001-of-00003.safetensors",
466
+ "language_model.model.layers.2.self_attn.q_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
467
+ "language_model.model.layers.2.self_attn.q_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
468
+ "language_model.model.layers.2.self_attn.q_proj.weight.quant_map": "model-00001-of-00003.safetensors",
469
+ "language_model.model.layers.2.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
470
  "language_model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
471
+ "language_model.model.layers.2.self_attn.v_proj.weight.absmax": "model-00001-of-00003.safetensors",
472
+ "language_model.model.layers.2.self_attn.v_proj.weight.nested_absmax": "model-00001-of-00003.safetensors",
473
+ "language_model.model.layers.2.self_attn.v_proj.weight.nested_quant_map": "model-00001-of-00003.safetensors",
474
+ "language_model.model.layers.2.self_attn.v_proj.weight.quant_map": "model-00001-of-00003.safetensors",
475
+ "language_model.model.layers.2.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00001-of-00003.safetensors",
476
  "language_model.model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
477
  "language_model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
478
  "language_model.model.layers.20.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
 
593
  "language_model.model.layers.22.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
594
  "language_model.model.layers.22.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
595
  "language_model.model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
596
  "language_model.model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
597
  "language_model.model.layers.22.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
598
  "language_model.model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
 
 
 
 
 
599
  "language_model.model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
600
+ "language_model.model.layers.23.input_layernorm.weight": "model-00002-of-00003.safetensors",
601
+ "language_model.model.layers.23.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
602
+ "language_model.model.layers.23.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
603
+ "language_model.model.layers.23.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
604
+ "language_model.model.layers.23.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
605
+ "language_model.model.layers.23.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
606
+ "language_model.model.layers.23.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
 
 
 
 
 
607
  "language_model.model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
608
  "language_model.model.layers.23.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
609
  "language_model.model.layers.23.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
610
  "language_model.model.layers.23.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
611
  "language_model.model.layers.23.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
612
  "language_model.model.layers.23.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
613
+ "language_model.model.layers.23.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
614
+ "language_model.model.layers.23.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
615
+ "language_model.model.layers.23.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
616
+ "language_model.model.layers.23.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
617
+ "language_model.model.layers.23.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
618
+ "language_model.model.layers.23.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
619
+ "language_model.model.layers.23.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
620
+ "language_model.model.layers.23.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
621
+ "language_model.model.layers.23.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
622
  "language_model.model.layers.23.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
623
  "language_model.model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
624
  "language_model.model.layers.23.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
 
645
  "language_model.model.layers.23.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
646
  "language_model.model.layers.23.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
647
  "language_model.model.layers.23.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
648
+ "language_model.model.layers.24.input_layernorm.weight": "model-00002-of-00003.safetensors",
649
+ "language_model.model.layers.24.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
650
+ "language_model.model.layers.24.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
651
+ "language_model.model.layers.24.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
652
+ "language_model.model.layers.24.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
653
+ "language_model.model.layers.24.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
654
+ "language_model.model.layers.24.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
655
+ "language_model.model.layers.24.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
656
+ "language_model.model.layers.24.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
657
+ "language_model.model.layers.24.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
658
+ "language_model.model.layers.24.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
659
+ "language_model.model.layers.24.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
660
+ "language_model.model.layers.24.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
661
+ "language_model.model.layers.24.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
662
+ "language_model.model.layers.24.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
663
+ "language_model.model.layers.24.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
664
+ "language_model.model.layers.24.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
665
+ "language_model.model.layers.24.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
666
+ "language_model.model.layers.24.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
667
+ "language_model.model.layers.24.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
668
+ "language_model.model.layers.24.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
669
+ "language_model.model.layers.24.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
670
+ "language_model.model.layers.24.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
671
+ "language_model.model.layers.24.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
672
+ "language_model.model.layers.24.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
673
+ "language_model.model.layers.24.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
674
+ "language_model.model.layers.24.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
675
+ "language_model.model.layers.24.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
676
+ "language_model.model.layers.24.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
677
+ "language_model.model.layers.24.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
678
+ "language_model.model.layers.24.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
679
+ "language_model.model.layers.24.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
680
+ "language_model.model.layers.24.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
681
+ "language_model.model.layers.24.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
682
+ "language_model.model.layers.24.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
683
+ "language_model.model.layers.24.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
684
+ "language_model.model.layers.24.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
685
+ "language_model.model.layers.24.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
686
+ "language_model.model.layers.24.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
687
+ "language_model.model.layers.24.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
688
+ "language_model.model.layers.24.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
689
+ "language_model.model.layers.24.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
690
+ "language_model.model.layers.24.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
691
+ "language_model.model.layers.24.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
692
+ "language_model.model.layers.24.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
693
+ "language_model.model.layers.24.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
694
+ "language_model.model.layers.24.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
695
+ "language_model.model.layers.24.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
696
+ "language_model.model.layers.25.input_layernorm.weight": "model-00002-of-00003.safetensors",
697
+ "language_model.model.layers.25.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
698
+ "language_model.model.layers.25.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
699
+ "language_model.model.layers.25.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
700
+ "language_model.model.layers.25.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
701
+ "language_model.model.layers.25.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
702
+ "language_model.model.layers.25.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
703
+ "language_model.model.layers.25.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
704
+ "language_model.model.layers.25.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
705
+ "language_model.model.layers.25.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
706
+ "language_model.model.layers.25.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
707
+ "language_model.model.layers.25.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
708
+ "language_model.model.layers.25.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
709
+ "language_model.model.layers.25.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
710
+ "language_model.model.layers.25.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
711
+ "language_model.model.layers.25.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
712
+ "language_model.model.layers.25.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
713
+ "language_model.model.layers.25.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
714
+ "language_model.model.layers.25.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
715
+ "language_model.model.layers.25.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
716
+ "language_model.model.layers.25.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
717
+ "language_model.model.layers.25.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
718
+ "language_model.model.layers.25.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
719
+ "language_model.model.layers.25.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
720
+ "language_model.model.layers.25.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
721
+ "language_model.model.layers.25.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
722
+ "language_model.model.layers.25.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
723
+ "language_model.model.layers.25.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
724
+ "language_model.model.layers.25.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
725
+ "language_model.model.layers.25.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
726
+ "language_model.model.layers.25.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
727
+ "language_model.model.layers.25.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
728
+ "language_model.model.layers.25.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
729
+ "language_model.model.layers.25.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
730
+ "language_model.model.layers.25.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
731
+ "language_model.model.layers.25.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
732
+ "language_model.model.layers.25.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
733
+ "language_model.model.layers.25.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
734
+ "language_model.model.layers.25.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
735
+ "language_model.model.layers.25.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
736
+ "language_model.model.layers.25.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
737
+ "language_model.model.layers.25.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
738
+ "language_model.model.layers.25.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
739
+ "language_model.model.layers.25.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
740
+ "language_model.model.layers.25.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
741
+ "language_model.model.layers.25.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
742
+ "language_model.model.layers.25.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
743
+ "language_model.model.layers.25.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
744
+ "language_model.model.layers.26.input_layernorm.weight": "model-00002-of-00003.safetensors",
745
+ "language_model.model.layers.26.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
746
+ "language_model.model.layers.26.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
747
+ "language_model.model.layers.26.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
748
+ "language_model.model.layers.26.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
749
+ "language_model.model.layers.26.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
750
+ "language_model.model.layers.26.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
751
+ "language_model.model.layers.26.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
752
+ "language_model.model.layers.26.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
753
+ "language_model.model.layers.26.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
754
+ "language_model.model.layers.26.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
755
+ "language_model.model.layers.26.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
756
+ "language_model.model.layers.26.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
757
+ "language_model.model.layers.26.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
758
+ "language_model.model.layers.26.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
759
+ "language_model.model.layers.26.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
760
+ "language_model.model.layers.26.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
761
+ "language_model.model.layers.26.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
762
+ "language_model.model.layers.26.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
763
+ "language_model.model.layers.26.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
764
+ "language_model.model.layers.26.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
765
+ "language_model.model.layers.26.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
766
+ "language_model.model.layers.26.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
767
+ "language_model.model.layers.26.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
768
+ "language_model.model.layers.26.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
769
+ "language_model.model.layers.26.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
770
+ "language_model.model.layers.26.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
771
+ "language_model.model.layers.26.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
772
+ "language_model.model.layers.26.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
773
+ "language_model.model.layers.26.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
774
+ "language_model.model.layers.26.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
775
+ "language_model.model.layers.26.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
776
+ "language_model.model.layers.26.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
777
+ "language_model.model.layers.26.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
778
+ "language_model.model.layers.26.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
779
+ "language_model.model.layers.26.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
780
+ "language_model.model.layers.26.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
781
+ "language_model.model.layers.26.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
782
+ "language_model.model.layers.26.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
783
+ "language_model.model.layers.26.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
784
+ "language_model.model.layers.26.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
785
+ "language_model.model.layers.26.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
786
+ "language_model.model.layers.26.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
787
+ "language_model.model.layers.26.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
788
+ "language_model.model.layers.26.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
789
+ "language_model.model.layers.26.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
790
+ "language_model.model.layers.26.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
791
+ "language_model.model.layers.26.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
792
+ "language_model.model.layers.27.input_layernorm.weight": "model-00002-of-00003.safetensors",
793
+ "language_model.model.layers.27.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
794
+ "language_model.model.layers.27.mlp.down_proj.weight.absmax": "model-00002-of-00003.safetensors",
795
+ "language_model.model.layers.27.mlp.down_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
796
+ "language_model.model.layers.27.mlp.down_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
797
+ "language_model.model.layers.27.mlp.down_proj.weight.quant_map": "model-00002-of-00003.safetensors",
798
+ "language_model.model.layers.27.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
799
+ "language_model.model.layers.27.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
800
+ "language_model.model.layers.27.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
801
+ "language_model.model.layers.27.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
802
+ "language_model.model.layers.27.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
803
+ "language_model.model.layers.27.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
804
+ "language_model.model.layers.27.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
805
+ "language_model.model.layers.27.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
806
+ "language_model.model.layers.27.mlp.up_proj.weight.absmax": "model-00002-of-00003.safetensors",
807
+ "language_model.model.layers.27.mlp.up_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
808
+ "language_model.model.layers.27.mlp.up_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
809
+ "language_model.model.layers.27.mlp.up_proj.weight.quant_map": "model-00002-of-00003.safetensors",
810
+ "language_model.model.layers.27.mlp.up_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
811
+ "language_model.model.layers.27.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
812
+ "language_model.model.layers.27.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
813
+ "language_model.model.layers.27.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
814
+ "language_model.model.layers.27.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
815
+ "language_model.model.layers.27.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
816
+ "language_model.model.layers.27.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
817
+ "language_model.model.layers.27.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
818
+ "language_model.model.layers.27.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
819
+ "language_model.model.layers.27.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
820
+ "language_model.model.layers.27.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
821
+ "language_model.model.layers.27.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
822
+ "language_model.model.layers.27.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
823
+ "language_model.model.layers.27.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
824
+ "language_model.model.layers.27.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
825
+ "language_model.model.layers.27.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
826
+ "language_model.model.layers.27.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
827
+ "language_model.model.layers.27.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
828
+ "language_model.model.layers.27.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
829
+ "language_model.model.layers.27.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
830
+ "language_model.model.layers.27.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
831
+ "language_model.model.layers.27.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
832
+ "language_model.model.layers.27.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
833
+ "language_model.model.layers.27.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
834
+ "language_model.model.layers.27.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
835
+ "language_model.model.layers.27.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
836
+ "language_model.model.layers.27.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
837
+ "language_model.model.layers.27.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
838
+ "language_model.model.layers.27.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
839
+ "language_model.model.layers.27.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
840
  "language_model.model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
841
  "language_model.model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
842
  "language_model.model.layers.28.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
 
844
  "language_model.model.layers.28.mlp.down_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
845
  "language_model.model.layers.28.mlp.down_proj.weight.quant_map": "model-00003-of-00003.safetensors",
846
  "language_model.model.layers.28.mlp.down_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
847
+ "language_model.model.layers.28.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
848
+ "language_model.model.layers.28.mlp.gate_proj.weight.absmax": "model-00002-of-00003.safetensors",
849
+ "language_model.model.layers.28.mlp.gate_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
850
+ "language_model.model.layers.28.mlp.gate_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
851
+ "language_model.model.layers.28.mlp.gate_proj.weight.quant_map": "model-00002-of-00003.safetensors",
852
+ "language_model.model.layers.28.mlp.gate_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
853
  "language_model.model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
854
  "language_model.model.layers.28.mlp.up_proj.weight.absmax": "model-00003-of-00003.safetensors",
855
  "language_model.model.layers.28.mlp.up_proj.weight.nested_absmax": "model-00003-of-00003.safetensors",
 
859
  "language_model.model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
860
  "language_model.model.layers.28.post_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
861
  "language_model.model.layers.28.pre_feedforward_layernorm.weight": "model-00003-of-00003.safetensors",
862
+ "language_model.model.layers.28.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
863
+ "language_model.model.layers.28.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
864
+ "language_model.model.layers.28.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
865
+ "language_model.model.layers.28.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
866
+ "language_model.model.layers.28.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
867
+ "language_model.model.layers.28.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
868
+ "language_model.model.layers.28.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
869
+ "language_model.model.layers.28.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
870
+ "language_model.model.layers.28.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
871
+ "language_model.model.layers.28.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
872
+ "language_model.model.layers.28.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
873
+ "language_model.model.layers.28.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
874
+ "language_model.model.layers.28.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
875
+ "language_model.model.layers.28.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
876
+ "language_model.model.layers.28.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
877
+ "language_model.model.layers.28.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
878
+ "language_model.model.layers.28.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
879
+ "language_model.model.layers.28.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
880
+ "language_model.model.layers.28.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
881
+ "language_model.model.layers.28.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
882
+ "language_model.model.layers.28.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
883
+ "language_model.model.layers.28.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
884
+ "language_model.model.layers.28.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
885
+ "language_model.model.layers.28.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
886
+ "language_model.model.layers.28.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
887
+ "language_model.model.layers.28.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
888
  "language_model.model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
889
  "language_model.model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
890
  "language_model.model.layers.29.mlp.down_proj.weight.absmax": "model-00003-of-00003.safetensors",
 
1843
  "language_model.model.layers.47.self_attn.v_proj.weight.nested_quant_map": "model-00003-of-00003.safetensors",
1844
  "language_model.model.layers.47.self_attn.v_proj.weight.quant_map": "model-00003-of-00003.safetensors",
1845
  "language_model.model.layers.47.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00003-of-00003.safetensors",
1846
+ "language_model.model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
1847
+ "language_model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
1848
  "language_model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
1849
  "language_model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
1850
+ "language_model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
1851
+ "language_model.model.layers.5.post_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
1852
+ "language_model.model.layers.5.pre_feedforward_layernorm.weight": "model-00001-of-00003.safetensors",
1853
  "language_model.model.layers.5.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
1854
  "language_model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
1855
  "language_model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
 
1858
  "language_model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
1859
  "language_model.model.layers.6.input_layernorm.weight": "model-00002-of-00003.safetensors",
1860
  "language_model.model.layers.6.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1861
+ "language_model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
1862
  "language_model.model.layers.6.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
1863
  "language_model.model.layers.6.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
1864
  "language_model.model.layers.6.post_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1865
  "language_model.model.layers.6.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1866
+ "language_model.model.layers.6.self_attn.k_norm.weight": "model-00001-of-00003.safetensors",
1867
+ "language_model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
1868
+ "language_model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
1869
+ "language_model.model.layers.6.self_attn.q_norm.weight": "model-00001-of-00003.safetensors",
1870
+ "language_model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
1871
+ "language_model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
1872
  "language_model.model.layers.7.input_layernorm.weight": "model-00002-of-00003.safetensors",
1873
  "language_model.model.layers.7.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1874
  "language_model.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
1891
  "language_model.model.layers.8.pre_feedforward_layernorm.weight": "model-00002-of-00003.safetensors",
1892
  "language_model.model.layers.8.self_attn.k_norm.weight": "model-00002-of-00003.safetensors",
1893
  "language_model.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
1894
+ "language_model.model.layers.8.self_attn.k_proj.weight.absmax": "model-00002-of-00003.safetensors",
1895
+ "language_model.model.layers.8.self_attn.k_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
1896
+ "language_model.model.layers.8.self_attn.k_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
1897
+ "language_model.model.layers.8.self_attn.k_proj.weight.quant_map": "model-00002-of-00003.safetensors",
1898
+ "language_model.model.layers.8.self_attn.k_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
1899
  "language_model.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
1900
+ "language_model.model.layers.8.self_attn.o_proj.weight.absmax": "model-00002-of-00003.safetensors",
1901
+ "language_model.model.layers.8.self_attn.o_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
1902
+ "language_model.model.layers.8.self_attn.o_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
1903
+ "language_model.model.layers.8.self_attn.o_proj.weight.quant_map": "model-00002-of-00003.safetensors",
1904
+ "language_model.model.layers.8.self_attn.o_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
1905
  "language_model.model.layers.8.self_attn.q_norm.weight": "model-00002-of-00003.safetensors",
1906
  "language_model.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
1907
+ "language_model.model.layers.8.self_attn.q_proj.weight.absmax": "model-00002-of-00003.safetensors",
1908
+ "language_model.model.layers.8.self_attn.q_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
1909
+ "language_model.model.layers.8.self_attn.q_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
1910
+ "language_model.model.layers.8.self_attn.q_proj.weight.quant_map": "model-00002-of-00003.safetensors",
1911
+ "language_model.model.layers.8.self_attn.q_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
1912
  "language_model.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
1913
+ "language_model.model.layers.8.self_attn.v_proj.weight.absmax": "model-00002-of-00003.safetensors",
1914
+ "language_model.model.layers.8.self_attn.v_proj.weight.nested_absmax": "model-00002-of-00003.safetensors",
1915
+ "language_model.model.layers.8.self_attn.v_proj.weight.nested_quant_map": "model-00002-of-00003.safetensors",
1916
+ "language_model.model.layers.8.self_attn.v_proj.weight.quant_map": "model-00002-of-00003.safetensors",
1917
+ "language_model.model.layers.8.self_attn.v_proj.weight.quant_state.bitsandbytes__nf4": "model-00002-of-00003.safetensors",
1918
  "language_model.model.layers.9.input_layernorm.weight": "model-00002-of-00003.safetensors",
1919
  "language_model.model.layers.9.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
1920
  "language_model.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
tokenizer_config.json CHANGED
@@ -51325,7 +51325,6 @@
51325
  },
51326
  "boi_token": "<start_of_image>",
51327
  "bos_token": "<bos>",
51328
- "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
51329
  "clean_up_tokenization_spaces": false,
51330
  "eoi_token": "<end_of_image>",
51331
  "eos_token": "<end_of_turn>",
 
51325
  },
51326
  "boi_token": "<start_of_image>",
51327
  "bos_token": "<bos>",
 
51328
  "clean_up_tokenization_spaces": false,
51329
  "eoi_token": "<end_of_image>",
51330
  "eos_token": "<end_of_turn>",