Doctor-Shotgun commited on
Commit
d532723
·
verified ·
1 Parent(s): 9aa7243

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. config.json +42 -0
  3. configuration_qwen3_shared_moe.py +230 -0
  4. generation_config.json +4 -0
  5. merges.txt +0 -0
  6. model-00001-of-00095.safetensors +3 -0
  7. model-00002-of-00095.safetensors +3 -0
  8. model-00003-of-00095.safetensors +3 -0
  9. model-00004-of-00095.safetensors +3 -0
  10. model-00005-of-00095.safetensors +3 -0
  11. model-00006-of-00095.safetensors +3 -0
  12. model-00007-of-00095.safetensors +3 -0
  13. model-00008-of-00095.safetensors +3 -0
  14. model-00009-of-00095.safetensors +3 -0
  15. model-00010-of-00095.safetensors +3 -0
  16. model-00011-of-00095.safetensors +3 -0
  17. model-00012-of-00095.safetensors +3 -0
  18. model-00013-of-00095.safetensors +3 -0
  19. model-00014-of-00095.safetensors +3 -0
  20. model-00015-of-00095.safetensors +3 -0
  21. model-00016-of-00095.safetensors +3 -0
  22. model-00017-of-00095.safetensors +3 -0
  23. model-00018-of-00095.safetensors +3 -0
  24. model-00019-of-00095.safetensors +3 -0
  25. model-00020-of-00095.safetensors +3 -0
  26. model-00021-of-00095.safetensors +3 -0
  27. model-00022-of-00095.safetensors +3 -0
  28. model-00023-of-00095.safetensors +3 -0
  29. model-00024-of-00095.safetensors +3 -0
  30. model-00025-of-00095.safetensors +3 -0
  31. model-00026-of-00095.safetensors +3 -0
  32. model-00027-of-00095.safetensors +3 -0
  33. model-00028-of-00095.safetensors +3 -0
  34. model-00029-of-00095.safetensors +3 -0
  35. model-00030-of-00095.safetensors +3 -0
  36. model-00031-of-00095.safetensors +3 -0
  37. model-00032-of-00095.safetensors +3 -0
  38. model-00033-of-00095.safetensors +3 -0
  39. model-00034-of-00095.safetensors +3 -0
  40. model-00035-of-00095.safetensors +3 -0
  41. model-00036-of-00095.safetensors +3 -0
  42. model-00037-of-00095.safetensors +3 -0
  43. model-00038-of-00095.safetensors +3 -0
  44. model-00039-of-00095.safetensors +3 -0
  45. model-00040-of-00095.safetensors +3 -0
  46. model-00041-of-00095.safetensors +3 -0
  47. model-00042-of-00095.safetensors +3 -0
  48. model-00043-of-00095.safetensors +3 -0
  49. model-00044-of-00095.safetensors +3 -0
  50. model-00045-of-00095.safetensors +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3SharedMoeForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_qwen3_shared_moe.Qwen3SharedMoeConfig",
7
+ "AutoModel": "modeling_qwen3_shared_moe.Qwen3SharedMoeModel",
8
+ "AutoModelForCausalLM": "modeling_qwen3_shared_moe.Qwen3SharedMoeForCausalLM"
9
+ },
10
+ "attention_bias": false,
11
+ "attention_dropout": 0.0,
12
+ "decoder_sparse_step": 1,
13
+ "head_dim": 128,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 4096,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 12288,
18
+ "max_position_embeddings": 262144,
19
+ "max_window_layers": 94,
20
+ "mlp_only_layers": [],
21
+ "model_type": "qwen3_shared_moe",
22
+ "moe_intermediate_size": 1536,
23
+ "norm_topk_prob": true,
24
+ "num_attention_heads": 64,
25
+ "num_experts": 128,
26
+ "num_experts_per_tok": 8,
27
+ "num_hidden_layers": 94,
28
+ "num_key_value_heads": 4,
29
+ "output_router_logits": false,
30
+ "rms_norm_eps": 1e-06,
31
+ "rope_scaling": null,
32
+ "rope_theta": 5000000,
33
+ "router_aux_loss_coef": 0.001,
34
+ "shared_expert_intermediate_size": null,
35
+ "sliding_window": null,
36
+ "tie_word_embeddings": false,
37
+ "torch_dtype": "bfloat16",
38
+ "transformers_version": "4.52.3",
39
+ "use_cache": true,
40
+ "use_sliding_window": false,
41
+ "vocab_size": 151936
42
+ }
configuration_qwen3_shared_moe.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Qwen3SharedMoE model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.modeling_rope_utils import rope_config_validation
19
+ from transformers.utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class Qwen3SharedMoeConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`Qwen3SharedMoeModel`]. It is used to instantiate a
28
+ Qwen3SharedMoE model according to the specified arguments, defining the model architecture. Instantiating a configuration
29
+ with the defaults will yield a similar configuration to that of [Qwen/Qwen3-MoE-15B-A2B](https://huggingface.co/Qwen/Qwen3-15B-A2B).
30
+
31
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
32
+ documentation from [`PretrainedConfig`] for more information.
33
+
34
+
35
+ Args:
36
+ vocab_size (`int`, *optional*, defaults to 151936):
37
+ Vocabulary size of the Qwen3SharedMoE model. Defines the number of different tokens that can be represented by the
38
+ `inputs_ids` passed when calling [`Qwen3SharedMoeModel`]
39
+ hidden_size (`int`, *optional*, defaults to 2048):
40
+ Dimension of the hidden representations.
41
+ intermediate_size (`int`, *optional*, defaults to 6144):
42
+ Dimension of the MLP representations.
43
+ num_hidden_layers (`int`, *optional*, defaults to 24):
44
+ Number of hidden layers in the Transformer encoder.
45
+ num_attention_heads (`int`, *optional*, defaults to 32):
46
+ Number of attention heads for each attention layer in the Transformer encoder.
47
+ num_key_value_heads (`int`, *optional*, defaults to 4):
48
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
49
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
50
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
51
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
52
+ by meanpooling all the original heads within that group. For more details checkout [this
53
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
54
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
55
+ The non-linear activation function (function or string) in the decoder.
56
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
57
+ The maximum sequence length that this model might ever be used with.
58
+ initializer_range (`float`, *optional*, defaults to 0.02):
59
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
60
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
61
+ The epsilon used by the rms normalization layers.
62
+ use_cache (`bool`, *optional*, defaults to `True`):
63
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
64
+ relevant if `config.is_decoder=True`.
65
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
66
+ Whether the model's input and output word embeddings should be tied.
67
+ rope_theta (`float`, *optional*, defaults to 10000.0):
68
+ The base period of the RoPE embeddings.
69
+ rope_scaling (`Dict`, *optional*):
70
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
71
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
72
+ accordingly.
73
+ Expected contents:
74
+ `rope_type` (`str`):
75
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
76
+ 'llama3'], with 'default' being the original RoPE implementation.
77
+ `factor` (`float`, *optional*):
78
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
79
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
80
+ original maximum pre-trained length.
81
+ `original_max_position_embeddings` (`int`, *optional*):
82
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
83
+ pretraining.
84
+ `attention_factor` (`float`, *optional*):
85
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
86
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
87
+ `factor` field to infer the suggested value.
88
+ `beta_fast` (`float`, *optional*):
89
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
90
+ ramp function. If unspecified, it defaults to 32.
91
+ `beta_slow` (`float`, *optional*):
92
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
93
+ ramp function. If unspecified, it defaults to 1.
94
+ `short_factor` (`List[float]`, *optional*):
95
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
96
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
97
+ size divided by the number of attention heads divided by 2
98
+ `long_factor` (`List[float]`, *optional*):
99
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
100
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
101
+ size divided by the number of attention heads divided by 2
102
+ `low_freq_factor` (`float`, *optional*):
103
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
104
+ `high_freq_factor` (`float`, *optional*):
105
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
106
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
107
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
108
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
109
+ Whether to use sliding window attention.
110
+ sliding_window (`int`, *optional*, defaults to 4096):
111
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
112
+ max_window_layers (`int`, *optional*, defaults to 28):
113
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
114
+ attention_dropout (`float`, *optional*, defaults to 0.0):
115
+ The dropout ratio for the attention probabilities.
116
+ decoder_sparse_step (`int`, *optional*, defaults to 1):
117
+ The frequency of the MoE layer.
118
+ moe_intermediate_size (`int`, *optional*, defaults to 768):
119
+ Intermediate size of the routed expert.
120
+ shared_expert_intermediate_size (`int`, *optional*, defaults to None):
121
+ Intermediate size of the shared expert. `None` means no shared expert.
122
+ num_experts_per_tok (`int`, *optional*, defaults to 8):
123
+ Number of selected experts.
124
+ num_experts (`int`, *optional*, defaults to 128):
125
+ Number of routed experts.
126
+ norm_topk_prob (`bool`, *optional*, defaults to `False`):
127
+ Whether to normalize the topk probabilities.
128
+ output_router_logits (`bool`, *optional*, defaults to `False`):
129
+ Whether or not the router logits should be returned by the model. Enabling this will also
130
+ allow the model to output the auxiliary loss, including load balancing loss and router z-loss.
131
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
132
+ The aux loss factor for the total loss.
133
+ mlp_only_layers (`List[int]`, *optional*, defaults to `[]`):
134
+ Indicate which layers use Qwen3SharedMoeMLP rather than Qwen3SharedMoeSparseMoeBlock
135
+ The list contains layer index, from 0 to num_layers-1 if we have num_layers layers
136
+ If `mlp_only_layers` is empty, `decoder_sparse_step` is used to determine the sparsity.
137
+ """
138
+
139
+ model_type = "qwen3_shared_moe"
140
+ keys_to_ignore_at_inference = ["past_key_values"]
141
+
142
+ # Default tensor parallel plan for base model `Qwen3SharedMoe`
143
+ base_model_tp_plan = {
144
+ "layers.*.self_attn.q_proj": "colwise",
145
+ "layers.*.self_attn.k_proj": "colwise",
146
+ "layers.*.self_attn.v_proj": "colwise",
147
+ "layers.*.self_attn.o_proj": "rowwise",
148
+ "layers.*.mlp.gate_proj": "colwise",
149
+ "layers.*.mlp.up_proj": "colwise",
150
+ "layers.*.mlp.down_proj": "rowwise",
151
+ }
152
+ base_model_pp_plan = {
153
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
154
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
155
+ "norm": (["hidden_states"], ["hidden_states"]),
156
+ }
157
+
158
+ def __init__(
159
+ self,
160
+ vocab_size=151936,
161
+ hidden_size=2048,
162
+ intermediate_size=6144,
163
+ num_hidden_layers=24,
164
+ num_attention_heads=32,
165
+ num_key_value_heads=4,
166
+ hidden_act="silu",
167
+ max_position_embeddings=32768,
168
+ initializer_range=0.02,
169
+ rms_norm_eps=1e-6,
170
+ use_cache=True,
171
+ tie_word_embeddings=False,
172
+ rope_theta=10000.0,
173
+ rope_scaling=None,
174
+ attention_bias=False,
175
+ use_sliding_window=False,
176
+ sliding_window=4096,
177
+ max_window_layers=28,
178
+ attention_dropout=0.0,
179
+ decoder_sparse_step=1,
180
+ moe_intermediate_size=768,
181
+ shared_expert_intermediate_size=None,
182
+ num_experts_per_tok=8,
183
+ num_experts=128,
184
+ norm_topk_prob=False,
185
+ output_router_logits=False,
186
+ router_aux_loss_coef=0.001,
187
+ mlp_only_layers=None,
188
+ **kwargs,
189
+ ):
190
+ self.vocab_size = vocab_size
191
+ self.max_position_embeddings = max_position_embeddings
192
+ self.hidden_size = hidden_size
193
+ self.intermediate_size = intermediate_size
194
+ self.num_hidden_layers = num_hidden_layers
195
+ self.num_attention_heads = num_attention_heads
196
+ self.use_sliding_window = use_sliding_window
197
+ self.sliding_window = sliding_window if use_sliding_window else None
198
+ self.max_window_layers = max_window_layers
199
+
200
+ self.num_key_value_heads = num_key_value_heads
201
+ self.hidden_act = hidden_act
202
+ self.initializer_range = initializer_range
203
+ self.rms_norm_eps = rms_norm_eps
204
+ self.use_cache = use_cache
205
+ self.rope_theta = rope_theta
206
+ self.rope_scaling = rope_scaling
207
+ self.attention_bias = attention_bias
208
+ self.attention_dropout = attention_dropout
209
+ # Validate the correctness of rotary position embeddings parameters
210
+ # BC: if there is a 'type' field, move it to 'rope_type'.
211
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
212
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
213
+ rope_config_validation(self)
214
+
215
+ # MoE arguments
216
+ self.decoder_sparse_step = decoder_sparse_step
217
+ self.moe_intermediate_size = moe_intermediate_size
218
+ self.shared_expert_intermediate_size = shared_expert_intermediate_size
219
+ self.num_experts_per_tok = num_experts_per_tok
220
+ self.num_experts = num_experts
221
+ self.norm_topk_prob = norm_topk_prob
222
+ self.output_router_logits = output_router_logits
223
+ self.router_aux_loss_coef = router_aux_loss_coef
224
+ self.mlp_only_layers = [] if mlp_only_layers is None else mlp_only_layers
225
+
226
+ super().__init__(
227
+ tie_word_embeddings=tie_word_embeddings,
228
+ **kwargs,
229
+ )
230
+
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.52.3"
4
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c9fd38a3545adf6c49487ba120794adc45d320959f7931e66b4a64dad0fe37b
3
+ size 4609541672
model-00002-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:323e41e6b1995960be63750619816d9a553a98de631bf6656ba928bb7d1ea00d
3
+ size 4975511320
model-00003-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9ae9ea07de48767eca8bfff1b36a7a2844868e90200748e51818e64abff2b0c
3
+ size 4975511320
model-00004-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58de9d1170aab351ad1601ce569cfd2dd13ee9367bb85ff08857ac0a5d0deb0a
3
+ size 4975511320
model-00005-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fb13acf2168ec811ccc903e3cfb1ae2f1fdc39ff3dbebfde62d903db1329822
3
+ size 4975511320
model-00006-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5733080e6e0d19b19a7eb793aee149f1101da93cf5034fd3a49d3ad80caa7ff3
3
+ size 4975511320
model-00007-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3595459dfb48ff9646b113521554dc4e536d82c63b4234fa613f49ce1cbb433
3
+ size 4975511320
model-00008-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ebe242f8bbb5286567fc3693cb04ad12387109ce0cca80ddb3f8b78771b046a
3
+ size 4975511320
model-00009-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd4bb2b679bc25e9d698e7650f2fe4dde9cde84392eaee96251b2f424f9c6aba
3
+ size 4975511320
model-00010-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3f25c5bcc94a95c3470486d62911e41a61d844aec29d4bb6260730c34673103
3
+ size 4975511320
model-00011-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2781eaebc6b5a64934b67ffb0288d7c0a7b8f1732b106e101ae3dc8456d8f5d
3
+ size 4975511336
model-00012-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f26f14baced85f0bb9fc0dcf18229c6940a6d9424df03706fab39b7e15aaf3d8
3
+ size 4975511328
model-00013-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:250d4ff1cbf7e4ae6ec3a0e15b2ea7c3b27c46aaa44cdf33d75c20ed56f57635
3
+ size 4975511328
model-00014-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a62cd354ed0ec3c09675623821e6cdcd5a80fff9743deebe5ecbadc9858415c5
3
+ size 4975511328
model-00015-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa7791cc6cf361c1637212ca979120582a65116ab5d4bf8945cb83f3918b241d
3
+ size 4975511328
model-00016-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb72ecdb9a29417ff03ee19867daf4cd795124b6e2f284a763098cd18dabe9b3
3
+ size 4975511328
model-00017-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ceb17aaebbcc02499e229a9f93192e0b656fd3e5e59db9497d413b83fbd0b98
3
+ size 4975511328
model-00018-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af4637b92ada14f33610de50a4d6529d8549df481392703b3d915e904dd2e90d
3
+ size 4975511328
model-00019-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f3ab6a61b52b88fe4547336fcf8b4d9bc293310bdade874cf5ab7f0bde1b0a1
3
+ size 4975511328
model-00020-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76454d81a1a5ec69e4f1a8dd4ebd9f911137dad1660879f9533c007386df922e
3
+ size 4975511328
model-00021-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8021572afcc2945354520ef60ba7642bdf40b54db106c51b273398e74c21cbd5
3
+ size 4975511328
model-00022-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f15e8f58b6b56c2ab4122d6d1c5937cc913fb65e08fb008bfc26ac92fc0fb564
3
+ size 4975511328
model-00023-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0ccce6c7f324c3a0ed210f58f8cf5670d7f607519a27f5b1070fc496d433c69
3
+ size 4975511328
model-00024-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ea2f12e4ea437d67037695356b90142a334c6acd5df21f6e2bff112794f8791
3
+ size 4975511328
model-00025-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e95c04541b074c26c18d81ee55bfc6dbe62e8121e9283e187f2578ae243ccd9e
3
+ size 4975511328
model-00026-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7144e36cb3e495a47d96370b6059946b9c4e6a8c20b2018955772b55b2a337a
3
+ size 4975511328
model-00027-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3906e8af289e0ddc84f32297ab214176f09caa6709437fc5ed588d29f420c67e
3
+ size 4975511328
model-00028-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ffd48bbe067b8eccb83d6a4b1232015337bf953d8fdeca713db8e4831a139dd
3
+ size 4975511328
model-00029-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7c0d30a39ce3af5ada2ca7c7cba88cdd7cd147b38a1b52f636130d29c8edde0
3
+ size 4975511328
model-00030-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54ae1399c68b2e4e8dea2b97fce64aa85d7122179547af72b1e7b61db5a88a53
3
+ size 4975511328
model-00031-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f1c413c32c1cf6017ca775906d7311e5e2e4f53c6e40dea15a8d6f9178e6db7
3
+ size 4975511328
model-00032-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e7057dd4181919c06bbca1c0b203e2cc448f6a15093584265cd0d308d98e131
3
+ size 4975511328
model-00033-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dae9ad195afcedaceb8cce509abd08ee004557aaf236312c53d388e2cd23ddd8
3
+ size 4975511328
model-00034-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bea005f37c86e62e5586432487b484c47715394490322fc8502a00e79653a77
3
+ size 4975511328
model-00035-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9443d308866d8cbb8f44bd5346d052b12e0d7f61db15c2d15ce8f5bad0ff1bf5
3
+ size 4975511328
model-00036-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f784257c868e02377ac0b030f4ea32de10883535e72262a252c39eb6677e1b9
3
+ size 4975511328
model-00037-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:919a64aa4344463523b9b3d38f588781af849faa4bddb0e839be6908965d5ddb
3
+ size 4975511328
model-00038-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d132952f4b47a9a921942f024f62e65998040d362656cc39f93d2d0f71214cc
3
+ size 4975511328
model-00039-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c157863e5bc6a39fcea0c6d79b3b87badda1a9c4f20e9a60395af6be173832c
3
+ size 4975511328
model-00040-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04d4a6242647684d6cb1c3aefeb688ef8eb84d643caa5d37fafcfcea45ca04b6
3
+ size 4975511328
model-00041-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c142dfdea9b973cf2720a0a40d667cf29922ccf27d796db84f7e65c9fa185ac
3
+ size 4975511328
model-00042-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45be3f97a40b21e2dd99745603416558517245442f585a55915f1d60ba86f980
3
+ size 4975511328
model-00043-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e76d65dd63c87c46be9e08e2df5662f2447defcb6d6e86a88e453f4b0a84ac3
3
+ size 4975511328
model-00044-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a68b6daa0faa3e022d69f068499a117d66433dd38a986a8d89070ef2a506812
3
+ size 4975511328
model-00045-of-00095.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75f72d75eeedf9b07753355286bb4e37d9197e95f00b0887410ad31d657630eb
3
+ size 4975511328