zerofata commited on
Commit
c7061bd
·
verified ·
1 Parent(s): 501d358

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. args.json +467 -0
  3. chat_template.jinja +103 -0
  4. config.json +43 -0
  5. generation_config.json +10 -0
  6. model-00001-of-00043.safetensors +3 -0
  7. model-00002-of-00043.safetensors +3 -0
  8. model-00003-of-00043.safetensors +3 -0
  9. model-00004-of-00043.safetensors +3 -0
  10. model-00005-of-00043.safetensors +3 -0
  11. model-00006-of-00043.safetensors +3 -0
  12. model-00007-of-00043.safetensors +3 -0
  13. model-00008-of-00043.safetensors +3 -0
  14. model-00009-of-00043.safetensors +3 -0
  15. model-00010-of-00043.safetensors +3 -0
  16. model-00011-of-00043.safetensors +3 -0
  17. model-00012-of-00043.safetensors +3 -0
  18. model-00013-of-00043.safetensors +3 -0
  19. model-00014-of-00043.safetensors +3 -0
  20. model-00015-of-00043.safetensors +3 -0
  21. model-00016-of-00043.safetensors +3 -0
  22. model-00017-of-00043.safetensors +3 -0
  23. model-00018-of-00043.safetensors +3 -0
  24. model-00019-of-00043.safetensors +3 -0
  25. model-00020-of-00043.safetensors +3 -0
  26. model-00021-of-00043.safetensors +3 -0
  27. model-00022-of-00043.safetensors +3 -0
  28. model-00023-of-00043.safetensors +3 -0
  29. model-00024-of-00043.safetensors +3 -0
  30. model-00025-of-00043.safetensors +3 -0
  31. model-00026-of-00043.safetensors +3 -0
  32. model-00027-of-00043.safetensors +3 -0
  33. model-00028-of-00043.safetensors +3 -0
  34. model-00029-of-00043.safetensors +3 -0
  35. model-00030-of-00043.safetensors +3 -0
  36. model-00031-of-00043.safetensors +3 -0
  37. model-00032-of-00043.safetensors +3 -0
  38. model-00033-of-00043.safetensors +3 -0
  39. model-00035-of-00043.safetensors +3 -0
  40. model-00036-of-00043.safetensors +3 -0
  41. model-00037-of-00043.safetensors +3 -0
  42. model-00038-of-00043.safetensors +3 -0
  43. model-00039-of-00043.safetensors +3 -0
  44. model-00040-of-00043.safetensors +3 -0
  45. model-00041-of-00043.safetensors +3 -0
  46. model-00042-of-00043.safetensors +3 -0
  47. model-00043-of-00043.safetensors +3 -0
  48. model.safetensors.index.json +0 -0
  49. special_tokens_map.json +40 -0
  50. tokenizer.json +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
args.json ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "use_ray": false,
3
+ "ray_exp_name": null,
4
+ "device_groups": null,
5
+ "model": "zai-org/GLM-4.5-Air",
6
+ "model_type": "glm4_5",
7
+ "model_revision": null,
8
+ "task_type": "causal_lm",
9
+ "torch_dtype": "bfloat16",
10
+ "attn_impl": null,
11
+ "new_special_tokens": [],
12
+ "num_labels": null,
13
+ "problem_type": null,
14
+ "rope_scaling": null,
15
+ "device_map": null,
16
+ "max_memory": {},
17
+ "max_model_len": null,
18
+ "local_repo_path": null,
19
+ "init_strategy": null,
20
+ "template": "glm4_5",
21
+ "system": null,
22
+ "max_length": 10280,
23
+ "truncation_strategy": "delete",
24
+ "max_pixels": null,
25
+ "agent_template": null,
26
+ "norm_bbox": null,
27
+ "use_chat_template": true,
28
+ "padding_free": true,
29
+ "padding_side": "right",
30
+ "loss_scale": "default",
31
+ "sequence_parallel_size": 1,
32
+ "response_prefix": null,
33
+ "template_backend": "swift",
34
+ "dataset": [
35
+ "/workspace/joined_dataset_cleaned_modified.jsonl"
36
+ ],
37
+ "val_dataset": [],
38
+ "split_dataset_ratio": 0.01,
39
+ "data_seed": 42,
40
+ "dataset_num_proc": 8,
41
+ "load_from_cache_file": true,
42
+ "dataset_shuffle": true,
43
+ "val_dataset_shuffle": false,
44
+ "streaming": false,
45
+ "interleave_prob": null,
46
+ "stopping_strategy": "first_exhausted",
47
+ "shuffle_buffer_size": 1000,
48
+ "download_mode": "reuse_dataset_if_exists",
49
+ "columns": {},
50
+ "strict": false,
51
+ "remove_unused_columns": true,
52
+ "model_name": null,
53
+ "model_author": null,
54
+ "custom_dataset_info": [],
55
+ "quant_method": null,
56
+ "quant_bits": null,
57
+ "hqq_axis": null,
58
+ "bnb_4bit_compute_dtype": "bfloat16",
59
+ "bnb_4bit_quant_type": "nf4",
60
+ "bnb_4bit_use_double_quant": true,
61
+ "bnb_4bit_quant_storage": null,
62
+ "max_new_tokens": null,
63
+ "temperature": null,
64
+ "top_k": null,
65
+ "top_p": null,
66
+ "repetition_penalty": null,
67
+ "num_beams": 1,
68
+ "stream": false,
69
+ "stop_words": [],
70
+ "logprobs": false,
71
+ "top_logprobs": null,
72
+ "ckpt_dir": "/workspace/glm-4.5-air-mcore",
73
+ "lora_modules": [],
74
+ "tuner_backend": "peft",
75
+ "train_type": "lora",
76
+ "adapters": [],
77
+ "external_plugins": [],
78
+ "seed": 42,
79
+ "model_kwargs": {},
80
+ "load_args": false,
81
+ "load_data_args": false,
82
+ "packing": true,
83
+ "packing_length": 10280,
84
+ "lazy_tokenize": false,
85
+ "cached_dataset": [],
86
+ "custom_register_path": [],
87
+ "use_hf": false,
88
+ "hub_token": null,
89
+ "ddp_timeout": 18000000,
90
+ "ddp_backend": null,
91
+ "ignore_args_error": false,
92
+ "use_swift_lora": false,
93
+ "freeze_llm": false,
94
+ "freeze_vit": true,
95
+ "freeze_aligner": true,
96
+ "freeze_parameters": [],
97
+ "freeze_parameters_regex": null,
98
+ "freeze_parameters_ratio": 0.0,
99
+ "trainable_parameters": [],
100
+ "trainable_parameters_regex": null,
101
+ "adapter_load": null,
102
+ "target_modules": [
103
+ "all-linear"
104
+ ],
105
+ "target_regex": null,
106
+ "modules_to_save": [],
107
+ "lora_rank": 256,
108
+ "lora_alpha": 16,
109
+ "lora_dropout": 0.05,
110
+ "lora_bias": "none",
111
+ "lora_dtype": null,
112
+ "use_rslora": true,
113
+ "rlhf_type": null,
114
+ "ref_load": null,
115
+ "ref_adapter_load": null,
116
+ "beta": 0.1,
117
+ "rpo_alpha": null,
118
+ "reference_free": false,
119
+ "label_smoothing": 0.0,
120
+ "f_divergence_type": "reverse_kl",
121
+ "loss_type": null,
122
+ "desirable_weight": 1.0,
123
+ "undesirable_weight": 1.0,
124
+ "calculate_KL": null,
125
+ "center_rewards_coefficient": null,
126
+ "padded_vocab_size": 151552,
127
+ "initialize_embedding": false,
128
+ "mlp_padding_free": false,
129
+ "dataloader_persistent_workers": true,
130
+ "dataloader_prefetch_factor": 10,
131
+ "architectures": "Glm4MoeForCausalLM",
132
+ "llm_architectures": null,
133
+ "max_epochs": 2,
134
+ "enable_dft_loss": false,
135
+ "enable_channel_loss": false,
136
+ "original_max_position_embeddings": null,
137
+ "partial_rotary_factor": 0.5,
138
+ "use_shared_expert_gate": false,
139
+ "vit_gradient_checkpointing": true,
140
+ "gradient_checkpointing_kwargs": null,
141
+ "linear_num_value_heads": null,
142
+ "linear_num_key_heads": null,
143
+ "linear_key_head_dim": null,
144
+ "linear_value_head_dim": null,
145
+ "linear_conv_kernel_dim": null,
146
+ "layer_types": null,
147
+ "mrope_interleaved": false,
148
+ "micro_batch_size": 4,
149
+ "global_batch_size": 32,
150
+ "recompute_granularity": "full",
151
+ "recompute_method": "uniform",
152
+ "recompute_num_layers": 1,
153
+ "recompute_modules": [
154
+ "core_attn"
155
+ ],
156
+ "use_cpu_initialization": false,
157
+ "deterministic_mode": false,
158
+ "train_iters": null,
159
+ "log_interval": 5,
160
+ "tensorboard_dir": "/workspace/megatron_output/Iceblink-v3-SFT-1/v0-20251101-193922/runs",
161
+ "no_masked_softmax_fusion": false,
162
+ "no_bias_dropout_fusion": false,
163
+ "no_bias_swiglu_fusion": false,
164
+ "no_rope_fusion": false,
165
+ "no_gradient_accumulation_fusion": false,
166
+ "cross_entropy_loss_fusion": true,
167
+ "cross_entropy_fusion_impl": "native",
168
+ "calculate_per_token_loss": true,
169
+ "use_flash_attn": false,
170
+ "attention_backend": "flash",
171
+ "optimizer": "adam",
172
+ "optimizer_cpu_offload": false,
173
+ "optimizer_offload_fraction": 1.0,
174
+ "use_precision_aware_optimizer": false,
175
+ "main_grads_dtype": "fp32",
176
+ "main_params_dtype": "fp32",
177
+ "exp_avg_dtype": "fp32",
178
+ "exp_avg_sq_dtype": "fp32",
179
+ "dataloader_type": "cyclic",
180
+ "manual_gc": false,
181
+ "manual_gc_interval": 0,
182
+ "lr": 1e-05,
183
+ "lr_decay_style": "cosine",
184
+ "lr_decay_iters": null,
185
+ "lr_warmup_iters": 0,
186
+ "lr_warmup_fraction": 0.05,
187
+ "min_lr": 1e-06,
188
+ "weight_decay": 0.1,
189
+ "clip_grad": 1.0,
190
+ "adam_beta1": 0.9,
191
+ "adam_beta2": 0.95,
192
+ "adam_eps": 1e-08,
193
+ "sgd_momentum": 0.9,
194
+ "save": "/workspace/megatron_output/Iceblink-v3-SFT-1/v0-20251101-193922",
195
+ "save_interval": 25,
196
+ "save_retain_interval": null,
197
+ "no_save_optim": true,
198
+ "no_save_rng": true,
199
+ "load": "/workspace/glm-4.5-air-mcore",
200
+ "no_load_optim": false,
201
+ "no_load_rng": false,
202
+ "finetune": true,
203
+ "ckpt_format": "torch_dist",
204
+ "no_initialization": true,
205
+ "auto_detect_ckpt_format": true,
206
+ "exit_on_missing_checkpoint": true,
207
+ "async_save": false,
208
+ "use_persistent_ckpt_worker": false,
209
+ "ckpt_fully_parallel_load": false,
210
+ "ckpt_assume_constant_structure": false,
211
+ "distributed_backend": "nccl",
212
+ "local_rank": 0,
213
+ "use_distributed_optimizer": true,
214
+ "tensor_model_parallel_size": 8,
215
+ "pipeline_model_parallel_size": 1,
216
+ "decoder_first_pipeline_num_layers": null,
217
+ "decoder_last_pipeline_num_layers": null,
218
+ "sequence_parallel": true,
219
+ "context_parallel_size": 1,
220
+ "tp_comm_overlap": false,
221
+ "overlap_grad_reduce": false,
222
+ "overlap_param_gather": false,
223
+ "distributed_timeout_minutes": 300000,
224
+ "num_layers_per_virtual_pipeline_stage": null,
225
+ "num_virtual_stages_per_pipeline_rank": null,
226
+ "microbatch_group_size_per_virtual_pipeline_stage": null,
227
+ "pipeline_model_parallel_layout": null,
228
+ "num_layers": 46,
229
+ "hidden_size": 4096,
230
+ "ffn_hidden_size": 10944,
231
+ "num_attention_heads": 96,
232
+ "group_query_attention": true,
233
+ "num_query_groups": 8,
234
+ "max_position_embeddings": 131072,
235
+ "position_embedding_type": "rope",
236
+ "mrope_section": null,
237
+ "rotary_base": 1000000,
238
+ "rotary_percent": 1.0,
239
+ "rotary_interleaved": false,
240
+ "normalization": "RMSNorm",
241
+ "norm_epsilon": 1e-05,
242
+ "swiglu": true,
243
+ "untie_embeddings_and_output_weights": true,
244
+ "disable_bias_linear": true,
245
+ "add_qkv_bias": true,
246
+ "attention_dropout": 0.0,
247
+ "hidden_dropout": 0.0,
248
+ "kv_channels": 128,
249
+ "qk_layernorm": false,
250
+ "transformer_impl": "transformer_engine",
251
+ "num_experts": 128,
252
+ "moe_layer_freq": "[0]*1+[1]*45",
253
+ "moe_ffn_hidden_size": 1408,
254
+ "moe_shared_expert_intermediate_size": 1408,
255
+ "moe_router_topk": 8,
256
+ "moe_router_pre_softmax": false,
257
+ "moe_router_dtype": "fp32",
258
+ "moe_router_score_function": "sigmoid",
259
+ "moe_router_bias_update_rate": 0.001,
260
+ "moe_router_enable_expert_bias": true,
261
+ "moe_router_topk_scaling_factor": 1.0,
262
+ "moe_router_load_balancing_type": "aux_loss",
263
+ "expert_model_parallel_size": 8,
264
+ "expert_tensor_parallel_size": 1,
265
+ "moe_token_dispatcher_type": null,
266
+ "moe_enable_deepep": false,
267
+ "moe_grouped_gemm": true,
268
+ "moe_permute_fusion": true,
269
+ "moe_aux_loss_coeff": 0.0005,
270
+ "moe_z_loss_coeff": null,
271
+ "moe_shared_expert_overlap": true,
272
+ "moe_layer_recompute": false,
273
+ "moe_expert_capacity_factor": null,
274
+ "moe_pad_expert_input_to_capacity": false,
275
+ "moe_token_drop_policy": null,
276
+ "multi_latent_attention": false,
277
+ "q_lora_rank": null,
278
+ "kv_lora_rank": 32,
279
+ "qk_head_dim": 128,
280
+ "qk_pos_emb_head_dim": 64,
281
+ "fp8_format": null,
282
+ "fp8_recipe": "delayed",
283
+ "fp8_amax_history_len": 1024,
284
+ "fp8_amax_compute_algo": "max",
285
+ "fp8_param_gather": false,
286
+ "fp16": false,
287
+ "bf16": true,
288
+ "apply_query_key_layer_scaling": false,
289
+ "attention_softmax_in_fp32": true,
290
+ "log_params_norm": false,
291
+ "log_throughput": false,
292
+ "tensorboard_log_interval": 1,
293
+ "tensorboard_queue_size": 50,
294
+ "log_timers_to_tensorboard": true,
295
+ "no_log_learning_rate_to_tensorboard": false,
296
+ "log_validation_ppl_to_tensorboard": true,
297
+ "log_memory_to_tensorboard": true,
298
+ "logging_level": null,
299
+ "wandb_project": "Megatron-Air-SFT",
300
+ "wandb_exp_name": "Iceblink-v3-SFT-1",
301
+ "wandb_save_dir": null,
302
+ "eval_iters": -1,
303
+ "eval_interval": 20,
304
+ "seq_length": 10280,
305
+ "num_workers": 8,
306
+ "megatron_extra_kwargs": {},
307
+ "add_version": true,
308
+ "rank": 0,
309
+ "global_world_size": 8,
310
+ "local_world_size": 8,
311
+ "model_suffix": "GLM-4.5-Air",
312
+ "model_info": "ModelInfo(model_type='glm4_5', model_dir='/root/.cache/modelscope/hub/models/ZhipuAI/GLM-4___5-Air', torch_dtype=torch.bfloat16, max_model_len=131072, quant_method=None, quant_bits=None, rope_scaling=None, is_moe_model=True, config=None, task_type='causal_lm', num_labels=None)",
313
+ "model_meta": "ModelMeta(model_type='glm4_5', model_groups=[ModelGroup(models=[Model(ms_model_id='ZhipuAI/GLM-4.5-Air-Base', hf_model_id='zai-org/GLM-4.5-Air-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-Air', hf_model_id='zai-org/GLM-4.5-Air', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-Air-FP8', hf_model_id='zai-org/GLM-4.5-Air-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-Base', hf_model_id='zai-org/GLM-4.5-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5', hf_model_id='zai-org/GLM-4.5', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-FP8', hf_model_id='zai-org/GLM-4.5-FP8', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='ZhipuAI/GLM-4.6', hf_model_id='zai-org/GLM-4.6', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='glm4_5', get_function=<function get_model_tokenizer_with_flash_attn at 0x718e1047a7a0>, model_arch=None, architectures=['Glm4MoeForCausalLM'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, is_reranker=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.54'], tags=[])",
314
+ "model_dir": "/root/.cache/modelscope/hub/models/ZhipuAI/GLM-4___5-Air",
315
+ "hub": "<class 'swift.hub.hub.MSHub'>",
316
+ "megatron_model_meta": "MegatronModelMeta(megatron_model_type='gpt', model_types=['qwen2', 'qwen2_5', 'qwq', 'qwq_preview', 'qwen2_5_math', 'llama', 'llama3', 'llama3_1', 'llama3_2', 'longwriter_llama3_1', 'codefuse_codellama', 'marco_o1', 'deepseek', 'deepseek_r1_distill', 'yi', 'yi_coder', 'sus', 'skywork_o1', 'openbuddy_llama', 'openbuddy_llama3', 'megrez', 'reflection', 'numina', 'ziya', 'mengzi3', 'qwen3', 'qwen3_thinking', 'qwen3_nothinking', 'qwen2_moe', 'qwen3_moe', 'qwen3_moe_thinking', 'qwen3_coder', 'internlm3', 'mimo', 'mimo_rl', 'moonlight', 'deepseek_moe', 'deepseek_v2', 'deepseek_v2_5', 'deepseek_r1', 'dots1', 'ernie', 'glm4_5', 'deepseek_v3_1', 'ernie_thinking'], convert_mcore2hf=<function convert_mcore2hf at 0x718d83275e40>, convert_hf2mcore=<function convert_hf2mcore at 0x718d83275a80>, model_cls=<class 'swift.megatron.model.gpt_model.GPTModel'>, convert_hf_config=<function convert_gpt_hf_config at 0x718d83245940>, get_transformer_layer_spec=None, model_provider=<function model_provider at 0x718d832179c0>, visual_cls=None, extra_args_provider=None)",
317
+ "extra_args": {
318
+ "use_ray": false,
319
+ "ray_exp_name": null,
320
+ "device_groups": null,
321
+ "model": "ZhipuAI/GLM-4.5-Air",
322
+ "model_type": "glm4_5",
323
+ "model_revision": null,
324
+ "task_type": "causal_lm",
325
+ "torch_dtype": "bfloat16",
326
+ "attn_impl": null,
327
+ "new_special_tokens": [],
328
+ "num_labels": null,
329
+ "problem_type": null,
330
+ "rope_scaling": null,
331
+ "device_map": null,
332
+ "max_memory": {},
333
+ "max_model_len": null,
334
+ "local_repo_path": null,
335
+ "init_strategy": null,
336
+ "template": "glm4_5",
337
+ "system": null,
338
+ "max_length": 10280,
339
+ "truncation_strategy": "delete",
340
+ "max_pixels": null,
341
+ "agent_template": null,
342
+ "norm_bbox": null,
343
+ "use_chat_template": true,
344
+ "padding_free": true,
345
+ "padding_side": "right",
346
+ "sequence_parallel_size": 1,
347
+ "response_prefix": null,
348
+ "template_backend": "swift",
349
+ "dataset": [
350
+ "/workspace/joined_dataset_cleaned_modified.jsonl"
351
+ ],
352
+ "val_dataset": [],
353
+ "split_dataset_ratio": 0.01,
354
+ "data_seed": 42,
355
+ "dataset_num_proc": 8,
356
+ "load_from_cache_file": true,
357
+ "dataset_shuffle": true,
358
+ "val_dataset_shuffle": false,
359
+ "streaming": false,
360
+ "interleave_prob": null,
361
+ "stopping_strategy": "first_exhausted",
362
+ "shuffle_buffer_size": 1000,
363
+ "download_mode": "reuse_dataset_if_exists",
364
+ "columns": {},
365
+ "strict": false,
366
+ "remove_unused_columns": true,
367
+ "model_name": null,
368
+ "model_author": null,
369
+ "custom_dataset_info": [],
370
+ "quant_method": null,
371
+ "quant_bits": null,
372
+ "hqq_axis": null,
373
+ "bnb_4bit_compute_dtype": "bfloat16",
374
+ "bnb_4bit_quant_type": "nf4",
375
+ "bnb_4bit_use_double_quant": true,
376
+ "bnb_4bit_quant_storage": null,
377
+ "max_new_tokens": null,
378
+ "temperature": null,
379
+ "top_k": null,
380
+ "top_p": null,
381
+ "repetition_penalty": null,
382
+ "num_beams": 1,
383
+ "stream": false,
384
+ "stop_words": [],
385
+ "logprobs": false,
386
+ "top_logprobs": null,
387
+ "ckpt_dir": "/workspace/glm-4.5-air-mcore",
388
+ "lora_modules": [],
389
+ "tuner_backend": "peft",
390
+ "train_type": "lora",
391
+ "adapters": [],
392
+ "external_plugins": [],
393
+ "model_kwargs": {},
394
+ "load_args": false,
395
+ "load_data_args": false,
396
+ "packing": true,
397
+ "packing_length": 10280,
398
+ "lazy_tokenize": false,
399
+ "cached_dataset": [],
400
+ "custom_register_path": [],
401
+ "use_hf": false,
402
+ "hub_token": null,
403
+ "ddp_timeout": 18000000,
404
+ "ddp_backend": null,
405
+ "ignore_args_error": false,
406
+ "use_swift_lora": false,
407
+ "freeze_llm": false,
408
+ "freeze_vit": true,
409
+ "freeze_aligner": true,
410
+ "freeze_parameters": [],
411
+ "freeze_parameters_regex": null,
412
+ "freeze_parameters_ratio": 0.0,
413
+ "trainable_parameters": [],
414
+ "trainable_parameters_regex": null,
415
+ "adapter_load": null,
416
+ "target_modules": [
417
+ "all-linear"
418
+ ],
419
+ "target_regex": null,
420
+ "modules_to_save": [],
421
+ "lora_rank": 256,
422
+ "lora_alpha": 16,
423
+ "lora_dropout": 0.05,
424
+ "lora_bias": "none",
425
+ "lora_dtype": null,
426
+ "use_rslora": true,
427
+ "rlhf_type": null,
428
+ "ref_load": null,
429
+ "ref_adapter_load": null,
430
+ "beta": 0.1,
431
+ "rpo_alpha": null,
432
+ "reference_free": false,
433
+ "label_smoothing": 0.0,
434
+ "f_divergence_type": "reverse_kl",
435
+ "loss_type": null,
436
+ "desirable_weight": 1.0,
437
+ "undesirable_weight": 1.0,
438
+ "calculate_KL": null,
439
+ "center_rewards_coefficient": null,
440
+ "padded_vocab_size": 151552,
441
+ "initialize_embedding": false,
442
+ "mlp_padding_free": false,
443
+ "dataloader_persistent_workers": true,
444
+ "dataloader_prefetch_factor": 10,
445
+ "architectures": "Glm4MoeForCausalLM",
446
+ "llm_architectures": null,
447
+ "max_epochs": 2,
448
+ "enable_dft_loss": false,
449
+ "enable_channel_loss": false,
450
+ "original_max_position_embeddings": null,
451
+ "partial_rotary_factor": 0.5,
452
+ "use_shared_expert_gate": false,
453
+ "vit_gradient_checkpointing": true,
454
+ "gradient_checkpointing_kwargs": null,
455
+ "linear_num_value_heads": null,
456
+ "linear_num_key_heads": null,
457
+ "linear_key_head_dim": null,
458
+ "linear_value_head_dim": null,
459
+ "linear_conv_kernel_dim": null,
460
+ "layer_types": null,
461
+ "mrope_interleaved": false,
462
+ "add_version": true,
463
+ "model_info": "ModelInfo(model_type='glm4_5', model_dir='/root/.cache/modelscope/hub/models/ZhipuAI/GLM-4___5-Air', torch_dtype=torch.bfloat16, max_model_len=131072, quant_method=None, quant_bits=None, rope_scaling=None, is_moe_model=True, config=None, task_type='causal_lm', num_labels=None)",
464
+ "model_meta": "ModelMeta(model_type='glm4_5', model_groups=[ModelGroup(models=[Model(ms_model_id='ZhipuAI/GLM-4.5-Air-Base', hf_model_id='zai-org/GLM-4.5-Air-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-Air', hf_model_id='zai-org/GLM-4.5-Air', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-Air-FP8', hf_model_id='zai-org/GLM-4.5-Air-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-Base', hf_model_id='zai-org/GLM-4.5-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5', hf_model_id='zai-org/GLM-4.5', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-FP8', hf_model_id='zai-org/GLM-4.5-FP8', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='ZhipuAI/GLM-4.6', hf_model_id='zai-org/GLM-4.6', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='glm4_5', get_function=<function get_model_tokenizer_with_flash_attn at 0x718e1047a7a0>, model_arch=None, architectures=['Glm4MoeForCausalLM'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, is_reranker=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.54'], tags=[])",
465
+ "megatron_model_meta": "MegatronModelMeta(megatron_model_type='gpt', model_types=['qwen2', 'qwen2_5', 'qwq', 'qwq_preview', 'qwen2_5_math', 'llama', 'llama3', 'llama3_1', 'llama3_2', 'longwriter_llama3_1', 'codefuse_codellama', 'marco_o1', 'deepseek', 'deepseek_r1_distill', 'yi', 'yi_coder', 'sus', 'skywork_o1', 'openbuddy_llama', 'openbuddy_llama3', 'megrez', 'reflection', 'numina', 'ziya', 'mengzi3', 'qwen3', 'qwen3_thinking', 'qwen3_nothinking', 'qwen2_moe', 'qwen3_moe', 'qwen3_moe_thinking', 'qwen3_coder', 'internlm3', 'mimo', 'mimo_rl', 'moonlight', 'deepseek_moe', 'deepseek_v2', 'deepseek_v2_5', 'deepseek_r1', 'dots1', 'ernie', 'glm4_5', 'deepseek_v3_1', 'ernie_thinking'], convert_mcore2hf=<function convert_mcore2hf at 0x718d83275e40>, convert_hf2mcore=<function convert_hf2mcore at 0x718d83275a80>, model_cls=<class 'swift.megatron.model.gpt_model.GPTModel'>, convert_hf_config=<function convert_gpt_hf_config at 0x718d83245940>, get_transformer_layer_spec=None, model_provider=<function model_provider at 0x718d832179c0>, visual_cls=None, extra_args_provider=None)"
466
+ }
467
+ }
chat_template.jinja ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [gMASK]<sop>
2
+ {%- if tools -%}
3
+ <|system|>
4
+ # Tools
5
+
6
+ You may call one or more functions to assist with the user query.
7
+
8
+ You are provided with function signatures within <tools></tools> XML tags:
9
+ <tools>
10
+ {% for tool in tools %}
11
+ {{ tool | tojson(ensure_ascii=False) }}
12
+ {% endfor %}
13
+ </tools>
14
+
15
+ For each function call, output the function name and arguments within the following XML format:
16
+ <tool_call>{function-name}
17
+ <arg_key>{arg-key-1}</arg_key>
18
+ <arg_value>{arg-value-1}</arg_value>
19
+ <arg_key>{arg-key-2}</arg_key>
20
+ <arg_value>{arg-value-2}</arg_value>
21
+ ...
22
+ </tool_call>{%- endif -%}
23
+ {%- macro visible_text(content) -%}
24
+ {%- if content is string -%}
25
+ {{- content }}
26
+ {%- elif content is iterable and content is not mapping -%}
27
+ {%- for item in content -%}
28
+ {%- if item is mapping and item.type == 'text' -%}
29
+ {{- item.text }}
30
+ {%- elif item is string -%}
31
+ {{- item }}
32
+ {%- endif -%}
33
+ {%- endfor -%}
34
+ {%- else -%}
35
+ {{- content }}
36
+ {%- endif -%}
37
+ {%- endmacro -%}
38
+ {%- set ns = namespace(last_user_index=-1) %}
39
+ {%- for m in messages %}
40
+ {%- if m.role == 'user' %}
41
+ {% set ns.last_user_index = loop.index0 -%}
42
+ {%- endif %}
43
+ {%- endfor %}
44
+ {% for m in messages %}
45
+ {%- if m.role == 'user' -%}<|user|>
46
+ {{ visible_text(m.content) }}
47
+ {{- '/nothink' if (enable_thinking is defined and not enable_thinking and not visible_text(m.content).endswith("/nothink")) else '' -}}
48
+ {%- elif m.role == 'assistant' -%}
49
+ <|assistant|>
50
+ {%- set reasoning_content = '' %}
51
+ {%- set content = visible_text(m.content) %}
52
+ {%- if m.reasoning_content is string %}
53
+ {%- set reasoning_content = m.reasoning_content %}
54
+ {%- else %}
55
+ {%- if '</think>' in content %}
56
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
57
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
58
+ {%- endif %}
59
+ {%- endif %}
60
+ {%- if loop.index0 > ns.last_user_index and reasoning_content -%}
61
+ {{ '\n<think>' + reasoning_content.strip() + '</think>'}}
62
+ {%- else -%}
63
+ {{ '\n<think></think>' }}
64
+ {%- endif -%}
65
+ {%- if content.strip() -%}
66
+ {{ '\n' + content.strip() }}
67
+ {%- endif -%}
68
+ {% if m.tool_calls %}
69
+ {% for tc in m.tool_calls %}
70
+ {%- if tc.function %}
71
+ {%- set tc = tc.function %}
72
+ {%- endif %}
73
+ {{ '\n<tool_call>' + tc.name }}
74
+ {% set _args = tc.arguments %}
75
+ {% for k, v in _args.items() %}
76
+ <arg_key>{{ k }}</arg_key>
77
+ <arg_value>{{ v | tojson(ensure_ascii=False) if v is not string else v }}</arg_value>
78
+ {% endfor %}
79
+ </tool_call>{% endfor %}
80
+ {% endif %}
81
+ {%- elif m.role == 'tool' -%}
82
+ {%- if m.content is string -%}
83
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
84
+ {{- '<|observation|>' }}
85
+ {%- endif %}
86
+ {{- '\n<tool_response>\n' }}
87
+ {{- m.content }}
88
+ {{- '\n</tool_response>' }}
89
+ {%- else -%}
90
+ <|observation|>{% for tr in m.content %}
91
+
92
+ <tool_response>
93
+ {{ tr.output if tr.output is defined else tr }}
94
+ </tool_response>{% endfor -%}
95
+ {% endif -%}
96
+ {%- elif m.role == 'system' -%}
97
+ <|system|>
98
+ {{ visible_text(m.content) }}
99
+ {%- endif -%}
100
+ {%- endfor -%}
101
+ {%- if add_generation_prompt -%}
102
+ <|assistant|>{{- '\n<think></think>' if (enable_thinking is defined and not enable_thinking) else '' -}}
103
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Glm4MoeForCausalLM"
4
+ ],
5
+ "attention_bias": true,
6
+ "attention_dropout": 0.0,
7
+ "dtype": "bfloat16",
8
+ "eos_token_id": [
9
+ 151329,
10
+ 151336,
11
+ 151338
12
+ ],
13
+ "first_k_dense_replace": 1,
14
+ "head_dim": 128,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 4096,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 10944,
19
+ "max_position_embeddings": 131072,
20
+ "model_type": "glm4_moe",
21
+ "moe_intermediate_size": 1408,
22
+ "n_group": 1,
23
+ "n_routed_experts": 128,
24
+ "n_shared_experts": 1,
25
+ "norm_topk_prob": true,
26
+ "num_attention_heads": 96,
27
+ "num_experts_per_tok": 8,
28
+ "num_hidden_layers": 46,
29
+ "num_key_value_heads": 8,
30
+ "num_nextn_predict_layers": 1,
31
+ "pad_token_id": 151329,
32
+ "partial_rotary_factor": 0.5,
33
+ "rms_norm_eps": 1e-05,
34
+ "rope_scaling": null,
35
+ "rope_theta": 1000000,
36
+ "routed_scaling_factor": 1.0,
37
+ "tie_word_embeddings": false,
38
+ "topk_group": 1,
39
+ "transformers_version": "4.57.1",
40
+ "use_cache": true,
41
+ "use_qk_norm": false,
42
+ "vocab_size": 151552
43
+ }
generation_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": [
4
+ 151329,
5
+ 151336,
6
+ 151338
7
+ ],
8
+ "pad_token_id": 151329,
9
+ "transformers_version": "4.57.1"
10
+ }
model-00001-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37639ec42fc97943ccf32767591cae3dc91e08acf7345fd34c40d8ec5118c879
3
+ size 4991854888
model-00002-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e1939bfea34981074cb751a23833688193a692124ff4791b11f11cd16048e8e
3
+ size 4994465672
model-00003-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:306adc29236fec4e3fd8faf76adb83186753bab68e590e4bcecbd0f5e138401b
3
+ size 4994465696
model-00004-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c122f3159e1cdd3d6f9d8ac038b27fcbf1ff7cd27d58cb6ca16664e42e60737a
3
+ size 4994465696
model-00005-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bd72817a2584a1befd52e547654c18c15dd541d4611cebe0bb3e1b128a04acc
3
+ size 4994465696
model-00006-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad4e5941cfeeba9ca6d576675be5be2d2b3ba0bff5df1fda94ff4a1e629fe9cb
3
+ size 4974587952
model-00007-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fae46b20e23147bac48c74099e7d8e7ffcf088f763961cfa1bd0c86d532bc299
3
+ size 4991318928
model-00008-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e6480fc83a8e8de6ed9864c7f5ff6082d00c9ed370ad438ad2dee96ec4978a2
3
+ size 4994465664
model-00009-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b69c78a10644794b5acea498179409aa7884ac0df573c3222d9e7d1d2ab1a90f
3
+ size 4994465784
model-00010-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:240b5d748bbd524acc23fadc1bd0abfe7a0a7ade9159d0fe4d74e54ca3ebe5a3
3
+ size 4994466104
model-00011-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:348bd51624d1c015ba118fd6428d916b9eb37a2a661ad68c9541995c0191a8bc
3
+ size 4994466104
model-00012-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b133d39a2f249c53ae7c0cd92d745874fa012896c9233a5eb03378e32440229
3
+ size 4994466104
model-00013-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1067ec336c66450a781d02d27cd266fb4522bfd7f3caae54d556b5e76415bbec
3
+ size 4994466104
model-00014-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ae01e88f3f47c16356157499e051210ea26ae5bfb94554e0ec7375cf6369558
3
+ size 4994466104
model-00015-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb240a8c63c072cdbe9e0d0fe4bb21dd5bd69097e2f5d533bb296f802de4a5c8
3
+ size 4994466104
model-00016-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae8f780a4e98896a5cdec39476173faa9146850801d175a74025aedd76a8bba2
3
+ size 4994466104
model-00017-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c33ac6b45306e75dc3dd41014517343a3a6d1fd9aa1ac17dc85776d23650b9e
3
+ size 4994466104
model-00018-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae1effe91c6c53ef0e7b30b4e6bd00bce3dabcb770ae905cefbbef636a7390c8
3
+ size 4994466112
model-00019-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99d3aab8293ef8e98eeeed7f4d418dd233b4aa5f4d6c570be3959068dd7f11d5
3
+ size 4994466120
model-00020-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:819de2744c126a9472a239dfdd97fceb67e3fd554c6a1ced85bb075015ba2e70
3
+ size 4994466120
model-00021-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1da88df140bf43acc805e403aa48041d173fa339cc1a827fae60b66d6e9e715
3
+ size 4992409992
model-00022-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e7ae936a92c97415b76fbf9c343834055311065c946f6adc097a24efb21cd53
3
+ size 4996566656
model-00023-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4af23f53b359da50cadecf2615fbba2db6c7a9e5f17f4b42504d5b48978ff36
3
+ size 4994466088
model-00024-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e600022626722fc1a27d2b1c7e2abe44011210f5740470f2b480b1852f4f75f8
3
+ size 4994466104
model-00025-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e15b5cf247594f7beee01c7b76e76159034c2c16151d65b6d83b729c1219cb8
3
+ size 4994466104
model-00026-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7950789e57ba1c211b76e2eb2f7f9d504c4ebd02422311a3e66c929a5c5d4636
3
+ size 4994466104
model-00027-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd9afe43c15516d09c99813fdfc1fd71dc9f5ad83ff458f7ca9bf4c7949f577c
3
+ size 4994466104
model-00028-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79c966254ec6db47d72fd0410f712330c4167f03846d0298fb3124d68fc474ef
3
+ size 4994466104
model-00029-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5939de91bff11a2deed12e68eb489198ba10cb6fd6566874bf997a1edfb54701
3
+ size 4994466104
model-00030-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55613837eb1a3d20ca416944023a6b6f1760d97ae053c876cf223d32238fadbd
3
+ size 4994466104
model-00031-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b371a2c51ee9e62b6a2fd29c50d41c5dc29a5d078c32406d86e6db4e8eb16d7
3
+ size 4994466104
model-00032-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:457e4928c935d2546a7bc209dc92c696f648b700d836381279f5fe44a78d81f3
3
+ size 4994466104
model-00033-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9532ddc08b2c89c4bccbe0c2001bf1da622004841460d919002622ffaa9d48a
3
+ size 4994466112
model-00035-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f540c11eeee253a19dd68faed3cbd369e3be606597737a638cc39b28a4949a63
3
+ size 4994466120
model-00036-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:856683d83c4e7a5ab8f5ce2fe70596d56f40db5b29076e879fcd755355954d05
3
+ size 4903256352
model-00037-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a15089c9135ecd10ec2416a338960e8bdfae017bfe53d7636bedd9c5cdd2c3a
3
+ size 4993444616
model-00038-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e3599aa802a524d2cd1feb3dcb5ab44804770c883aac764c61331acc5247c69
3
+ size 4994466080
model-00039-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b05e5a3f5c334fe4f2f9802cd858c8825ad2ca41635e44cd388bc9420f99e15
3
+ size 4994466104
model-00040-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4aeb2d838cb73505772f4b13f319b405e56d26fdc0478f8fa75b7fceb18b779a
3
+ size 4994466104
model-00041-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7aa75cf6c629e660807fc554384f5ee0af2c022b50765e7d1d6fc920f1c6fd49
3
+ size 4994466104
model-00042-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d0b67b4c40bd6a3cfde6c9f95ad0ab516cdf722927c82f6f585816a7fadf279
3
+ size 4994466104
model-00043-of-00043.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7aeb6d063560105a06d50e8629632d6cad53e88280112b82e9909243c2863a5
3
+ size 4056996976
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "[MASK]",
5
+ "[gMASK]",
6
+ "[sMASK]",
7
+ "<sop>",
8
+ "<eop>",
9
+ "<|system|>",
10
+ "<|user|>",
11
+ "<|assistant|>",
12
+ "<|observation|>",
13
+ "<|begin_of_image|>",
14
+ "<|end_of_image|>",
15
+ "<|begin_of_video|>",
16
+ "<|end_of_video|>",
17
+ "<|begin_of_audio|>",
18
+ "<|end_of_audio|>",
19
+ "<|begin_of_transcription|>",
20
+ "<|end_of_transcription|>",
21
+ "<|code_prefix|>",
22
+ "<|code_middle|>",
23
+ "<|code_suffix|>",
24
+ "/nothink"
25
+ ],
26
+ "eos_token": {
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ },
33
+ "pad_token": {
34
+ "content": "<|endoftext|>",
35
+ "lstrip": false,
36
+ "normalized": false,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ }
40
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bda8e2146c3bb7b7e0fc96dcc4f0aeff041c6c27952e3ace0665663ebff346ba
3
+ size 19970700