{ "use_ray": false, "ray_exp_name": null, "device_groups": null, "model": "zai-org/GLM-4.5-Air", "model_type": "glm4_5", "model_revision": null, "task_type": "causal_lm", "torch_dtype": "bfloat16", "attn_impl": null, "new_special_tokens": [], "num_labels": null, "problem_type": null, "rope_scaling": null, "device_map": null, "max_memory": {}, "max_model_len": null, "local_repo_path": null, "init_strategy": null, "template": "glm4_5", "system": null, "max_length": 10280, "truncation_strategy": "delete", "max_pixels": null, "agent_template": null, "norm_bbox": null, "use_chat_template": true, "padding_free": true, "padding_side": "right", "loss_scale": "default", "sequence_parallel_size": 1, "response_prefix": null, "template_backend": "swift", "dataset": [ "/workspace/joined_dataset_cleaned_modified.jsonl" ], "val_dataset": [], "split_dataset_ratio": 0.01, "data_seed": 42, "dataset_num_proc": 8, "load_from_cache_file": true, "dataset_shuffle": true, "val_dataset_shuffle": false, "streaming": false, "interleave_prob": null, "stopping_strategy": "first_exhausted", "shuffle_buffer_size": 1000, "download_mode": "reuse_dataset_if_exists", "columns": {}, "strict": false, "remove_unused_columns": true, "model_name": null, "model_author": null, "custom_dataset_info": [], "quant_method": null, "quant_bits": null, "hqq_axis": null, "bnb_4bit_compute_dtype": "bfloat16", "bnb_4bit_quant_type": "nf4", "bnb_4bit_use_double_quant": true, "bnb_4bit_quant_storage": null, "max_new_tokens": null, "temperature": null, "top_k": null, "top_p": null, "repetition_penalty": null, "num_beams": 1, "stream": false, "stop_words": [], "logprobs": false, "top_logprobs": null, "ckpt_dir": "/workspace/glm-4.5-air-mcore", "lora_modules": [], "tuner_backend": "peft", "train_type": "lora", "adapters": [], "external_plugins": [], "seed": 42, "model_kwargs": {}, "load_args": false, "load_data_args": false, "packing": true, "packing_length": 10280, "lazy_tokenize": false, "cached_dataset": [], "custom_register_path": [], "use_hf": false, "hub_token": null, "ddp_timeout": 18000000, "ddp_backend": null, "ignore_args_error": false, "use_swift_lora": false, "freeze_llm": false, "freeze_vit": true, "freeze_aligner": true, "freeze_parameters": [], "freeze_parameters_regex": null, "freeze_parameters_ratio": 0.0, "trainable_parameters": [], "trainable_parameters_regex": null, "adapter_load": null, "target_modules": [ "all-linear" ], "target_regex": null, "modules_to_save": [], "lora_rank": 256, "lora_alpha": 16, "lora_dropout": 0.05, "lora_bias": "none", "lora_dtype": null, "use_rslora": true, "rlhf_type": null, "ref_load": null, "ref_adapter_load": null, "beta": 0.1, "rpo_alpha": null, "reference_free": false, "label_smoothing": 0.0, "f_divergence_type": "reverse_kl", "loss_type": null, "desirable_weight": 1.0, "undesirable_weight": 1.0, "calculate_KL": null, "center_rewards_coefficient": null, "padded_vocab_size": 151552, "initialize_embedding": false, "mlp_padding_free": false, "dataloader_persistent_workers": true, "dataloader_prefetch_factor": 10, "architectures": "Glm4MoeForCausalLM", "llm_architectures": null, "max_epochs": 2, "enable_dft_loss": false, "enable_channel_loss": false, "original_max_position_embeddings": null, "partial_rotary_factor": 0.5, "use_shared_expert_gate": false, "vit_gradient_checkpointing": true, "gradient_checkpointing_kwargs": null, "linear_num_value_heads": null, "linear_num_key_heads": null, "linear_key_head_dim": null, "linear_value_head_dim": null, "linear_conv_kernel_dim": null, "layer_types": null, "mrope_interleaved": false, "micro_batch_size": 4, "global_batch_size": 32, "recompute_granularity": "full", "recompute_method": "uniform", "recompute_num_layers": 1, "recompute_modules": [ "core_attn" ], "use_cpu_initialization": false, "deterministic_mode": false, "train_iters": null, "log_interval": 5, "tensorboard_dir": "/workspace/megatron_output/Iceblink-v3-SFT-1/v0-20251101-193922/runs", "no_masked_softmax_fusion": false, "no_bias_dropout_fusion": false, "no_bias_swiglu_fusion": false, "no_rope_fusion": false, "no_gradient_accumulation_fusion": false, "cross_entropy_loss_fusion": true, "cross_entropy_fusion_impl": "native", "calculate_per_token_loss": true, "use_flash_attn": false, "attention_backend": "flash", "optimizer": "adam", "optimizer_cpu_offload": false, "optimizer_offload_fraction": 1.0, "use_precision_aware_optimizer": false, "main_grads_dtype": "fp32", "main_params_dtype": "fp32", "exp_avg_dtype": "fp32", "exp_avg_sq_dtype": "fp32", "dataloader_type": "cyclic", "manual_gc": false, "manual_gc_interval": 0, "lr": 1e-05, "lr_decay_style": "cosine", "lr_decay_iters": null, "lr_warmup_iters": 0, "lr_warmup_fraction": 0.05, "min_lr": 1e-06, "weight_decay": 0.1, "clip_grad": 1.0, "adam_beta1": 0.9, "adam_beta2": 0.95, "adam_eps": 1e-08, "sgd_momentum": 0.9, "save": "/workspace/megatron_output/Iceblink-v3-SFT-1/v0-20251101-193922", "save_interval": 25, "save_retain_interval": null, "no_save_optim": true, "no_save_rng": true, "load": "/workspace/glm-4.5-air-mcore", "no_load_optim": false, "no_load_rng": false, "finetune": true, "ckpt_format": "torch_dist", "no_initialization": true, "auto_detect_ckpt_format": true, "exit_on_missing_checkpoint": true, "async_save": false, "use_persistent_ckpt_worker": false, "ckpt_fully_parallel_load": false, "ckpt_assume_constant_structure": false, "distributed_backend": "nccl", "local_rank": 0, "use_distributed_optimizer": true, "tensor_model_parallel_size": 8, "pipeline_model_parallel_size": 1, "decoder_first_pipeline_num_layers": null, "decoder_last_pipeline_num_layers": null, "sequence_parallel": true, "context_parallel_size": 1, "tp_comm_overlap": false, "overlap_grad_reduce": false, "overlap_param_gather": false, "distributed_timeout_minutes": 300000, "num_layers_per_virtual_pipeline_stage": null, "num_virtual_stages_per_pipeline_rank": null, "microbatch_group_size_per_virtual_pipeline_stage": null, "pipeline_model_parallel_layout": null, "num_layers": 46, "hidden_size": 4096, "ffn_hidden_size": 10944, "num_attention_heads": 96, "group_query_attention": true, "num_query_groups": 8, "max_position_embeddings": 131072, "position_embedding_type": "rope", "mrope_section": null, "rotary_base": 1000000, "rotary_percent": 1.0, "rotary_interleaved": false, "normalization": "RMSNorm", "norm_epsilon": 1e-05, "swiglu": true, "untie_embeddings_and_output_weights": true, "disable_bias_linear": true, "add_qkv_bias": true, "attention_dropout": 0.0, "hidden_dropout": 0.0, "kv_channels": 128, "qk_layernorm": false, "transformer_impl": "transformer_engine", "num_experts": 128, "moe_layer_freq": "[0]*1+[1]*45", "moe_ffn_hidden_size": 1408, "moe_shared_expert_intermediate_size": 1408, "moe_router_topk": 8, "moe_router_pre_softmax": false, "moe_router_dtype": "fp32", "moe_router_score_function": "sigmoid", "moe_router_bias_update_rate": 0.001, "moe_router_enable_expert_bias": true, "moe_router_topk_scaling_factor": 1.0, "moe_router_load_balancing_type": "aux_loss", "expert_model_parallel_size": 8, "expert_tensor_parallel_size": 1, "moe_token_dispatcher_type": null, "moe_enable_deepep": false, "moe_grouped_gemm": true, "moe_permute_fusion": true, "moe_aux_loss_coeff": 0.0005, "moe_z_loss_coeff": null, "moe_shared_expert_overlap": true, "moe_layer_recompute": false, "moe_expert_capacity_factor": null, "moe_pad_expert_input_to_capacity": false, "moe_token_drop_policy": null, "multi_latent_attention": false, "q_lora_rank": null, "kv_lora_rank": 32, "qk_head_dim": 128, "qk_pos_emb_head_dim": 64, "fp8_format": null, "fp8_recipe": "delayed", "fp8_amax_history_len": 1024, "fp8_amax_compute_algo": "max", "fp8_param_gather": false, "fp16": false, "bf16": true, "apply_query_key_layer_scaling": false, "attention_softmax_in_fp32": true, "log_params_norm": false, "log_throughput": false, "tensorboard_log_interval": 1, "tensorboard_queue_size": 50, "log_timers_to_tensorboard": true, "no_log_learning_rate_to_tensorboard": false, "log_validation_ppl_to_tensorboard": true, "log_memory_to_tensorboard": true, "logging_level": null, "wandb_project": "Megatron-Air-SFT", "wandb_exp_name": "Iceblink-v3-SFT-1", "wandb_save_dir": null, "eval_iters": -1, "eval_interval": 20, "seq_length": 10280, "num_workers": 8, "megatron_extra_kwargs": {}, "add_version": true, "rank": 0, "global_world_size": 8, "local_world_size": 8, "model_suffix": "GLM-4.5-Air", "model_info": "ModelInfo(model_type='glm4_5', model_dir='/root/.cache/modelscope/hub/models/ZhipuAI/GLM-4___5-Air', torch_dtype=torch.bfloat16, max_model_len=131072, quant_method=None, quant_bits=None, rope_scaling=None, is_moe_model=True, config=None, task_type='causal_lm', num_labels=None)", "model_meta": "ModelMeta(model_type='glm4_5', model_groups=[ModelGroup(models=[Model(ms_model_id='ZhipuAI/GLM-4.5-Air-Base', hf_model_id='zai-org/GLM-4.5-Air-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-Air', hf_model_id='zai-org/GLM-4.5-Air', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-Air-FP8', hf_model_id='zai-org/GLM-4.5-Air-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-Base', hf_model_id='zai-org/GLM-4.5-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5', hf_model_id='zai-org/GLM-4.5', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-FP8', hf_model_id='zai-org/GLM-4.5-FP8', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='ZhipuAI/GLM-4.6', hf_model_id='zai-org/GLM-4.6', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='glm4_5', get_function=, model_arch=None, architectures=['Glm4MoeForCausalLM'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, is_reranker=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.54'], tags=[])", "model_dir": "/root/.cache/modelscope/hub/models/ZhipuAI/GLM-4___5-Air", "hub": "", "megatron_model_meta": "MegatronModelMeta(megatron_model_type='gpt', model_types=['qwen2', 'qwen2_5', 'qwq', 'qwq_preview', 'qwen2_5_math', 'llama', 'llama3', 'llama3_1', 'llama3_2', 'longwriter_llama3_1', 'codefuse_codellama', 'marco_o1', 'deepseek', 'deepseek_r1_distill', 'yi', 'yi_coder', 'sus', 'skywork_o1', 'openbuddy_llama', 'openbuddy_llama3', 'megrez', 'reflection', 'numina', 'ziya', 'mengzi3', 'qwen3', 'qwen3_thinking', 'qwen3_nothinking', 'qwen2_moe', 'qwen3_moe', 'qwen3_moe_thinking', 'qwen3_coder', 'internlm3', 'mimo', 'mimo_rl', 'moonlight', 'deepseek_moe', 'deepseek_v2', 'deepseek_v2_5', 'deepseek_r1', 'dots1', 'ernie', 'glm4_5', 'deepseek_v3_1', 'ernie_thinking'], convert_mcore2hf=, convert_hf2mcore=, model_cls=, convert_hf_config=, get_transformer_layer_spec=None, model_provider=, visual_cls=None, extra_args_provider=None)", "extra_args": { "use_ray": false, "ray_exp_name": null, "device_groups": null, "model": "ZhipuAI/GLM-4.5-Air", "model_type": "glm4_5", "model_revision": null, "task_type": "causal_lm", "torch_dtype": "bfloat16", "attn_impl": null, "new_special_tokens": [], "num_labels": null, "problem_type": null, "rope_scaling": null, "device_map": null, "max_memory": {}, "max_model_len": null, "local_repo_path": null, "init_strategy": null, "template": "glm4_5", "system": null, "max_length": 10280, "truncation_strategy": "delete", "max_pixels": null, "agent_template": null, "norm_bbox": null, "use_chat_template": true, "padding_free": true, "padding_side": "right", "sequence_parallel_size": 1, "response_prefix": null, "template_backend": "swift", "dataset": [ "/workspace/joined_dataset_cleaned_modified.jsonl" ], "val_dataset": [], "split_dataset_ratio": 0.01, "data_seed": 42, "dataset_num_proc": 8, "load_from_cache_file": true, "dataset_shuffle": true, "val_dataset_shuffle": false, "streaming": false, "interleave_prob": null, "stopping_strategy": "first_exhausted", "shuffle_buffer_size": 1000, "download_mode": "reuse_dataset_if_exists", "columns": {}, "strict": false, "remove_unused_columns": true, "model_name": null, "model_author": null, "custom_dataset_info": [], "quant_method": null, "quant_bits": null, "hqq_axis": null, "bnb_4bit_compute_dtype": "bfloat16", "bnb_4bit_quant_type": "nf4", "bnb_4bit_use_double_quant": true, "bnb_4bit_quant_storage": null, "max_new_tokens": null, "temperature": null, "top_k": null, "top_p": null, "repetition_penalty": null, "num_beams": 1, "stream": false, "stop_words": [], "logprobs": false, "top_logprobs": null, "ckpt_dir": "/workspace/glm-4.5-air-mcore", "lora_modules": [], "tuner_backend": "peft", "train_type": "lora", "adapters": [], "external_plugins": [], "model_kwargs": {}, "load_args": false, "load_data_args": false, "packing": true, "packing_length": 10280, "lazy_tokenize": false, "cached_dataset": [], "custom_register_path": [], "use_hf": false, "hub_token": null, "ddp_timeout": 18000000, "ddp_backend": null, "ignore_args_error": false, "use_swift_lora": false, "freeze_llm": false, "freeze_vit": true, "freeze_aligner": true, "freeze_parameters": [], "freeze_parameters_regex": null, "freeze_parameters_ratio": 0.0, "trainable_parameters": [], "trainable_parameters_regex": null, "adapter_load": null, "target_modules": [ "all-linear" ], "target_regex": null, "modules_to_save": [], "lora_rank": 256, "lora_alpha": 16, "lora_dropout": 0.05, "lora_bias": "none", "lora_dtype": null, "use_rslora": true, "rlhf_type": null, "ref_load": null, "ref_adapter_load": null, "beta": 0.1, "rpo_alpha": null, "reference_free": false, "label_smoothing": 0.0, "f_divergence_type": "reverse_kl", "loss_type": null, "desirable_weight": 1.0, "undesirable_weight": 1.0, "calculate_KL": null, "center_rewards_coefficient": null, "padded_vocab_size": 151552, "initialize_embedding": false, "mlp_padding_free": false, "dataloader_persistent_workers": true, "dataloader_prefetch_factor": 10, "architectures": "Glm4MoeForCausalLM", "llm_architectures": null, "max_epochs": 2, "enable_dft_loss": false, "enable_channel_loss": false, "original_max_position_embeddings": null, "partial_rotary_factor": 0.5, "use_shared_expert_gate": false, "vit_gradient_checkpointing": true, "gradient_checkpointing_kwargs": null, "linear_num_value_heads": null, "linear_num_key_heads": null, "linear_key_head_dim": null, "linear_value_head_dim": null, "linear_conv_kernel_dim": null, "layer_types": null, "mrope_interleaved": false, "add_version": true, "model_info": "ModelInfo(model_type='glm4_5', model_dir='/root/.cache/modelscope/hub/models/ZhipuAI/GLM-4___5-Air', torch_dtype=torch.bfloat16, max_model_len=131072, quant_method=None, quant_bits=None, rope_scaling=None, is_moe_model=True, config=None, task_type='causal_lm', num_labels=None)", "model_meta": "ModelMeta(model_type='glm4_5', model_groups=[ModelGroup(models=[Model(ms_model_id='ZhipuAI/GLM-4.5-Air-Base', hf_model_id='zai-org/GLM-4.5-Air-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-Air', hf_model_id='zai-org/GLM-4.5-Air', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-Air-FP8', hf_model_id='zai-org/GLM-4.5-Air-FP8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-Base', hf_model_id='zai-org/GLM-4.5-Base', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5', hf_model_id='zai-org/GLM-4.5', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='ZhipuAI/GLM-4.5-FP8', hf_model_id='zai-org/GLM-4.5-FP8', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='ZhipuAI/GLM-4.6', hf_model_id='zai-org/GLM-4.6', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='glm4_5', get_function=, model_arch=None, architectures=['Glm4MoeForCausalLM'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, is_reranker=False, task_type=None, ignore_patterns=None, requires=['transformers>=4.54'], tags=[])", "megatron_model_meta": "MegatronModelMeta(megatron_model_type='gpt', model_types=['qwen2', 'qwen2_5', 'qwq', 'qwq_preview', 'qwen2_5_math', 'llama', 'llama3', 'llama3_1', 'llama3_2', 'longwriter_llama3_1', 'codefuse_codellama', 'marco_o1', 'deepseek', 'deepseek_r1_distill', 'yi', 'yi_coder', 'sus', 'skywork_o1', 'openbuddy_llama', 'openbuddy_llama3', 'megrez', 'reflection', 'numina', 'ziya', 'mengzi3', 'qwen3', 'qwen3_thinking', 'qwen3_nothinking', 'qwen2_moe', 'qwen3_moe', 'qwen3_moe_thinking', 'qwen3_coder', 'internlm3', 'mimo', 'mimo_rl', 'moonlight', 'deepseek_moe', 'deepseek_v2', 'deepseek_v2_5', 'deepseek_r1', 'dots1', 'ernie', 'glm4_5', 'deepseek_v3_1', 'ernie_thinking'], convert_mcore2hf=, convert_hf2mcore=, model_cls=, convert_hf_config=, get_transformer_layer_spec=None, model_provider=, visual_cls=None, extra_args_provider=None)" } }