default_stage: default_modifiers: AWQModifier: config_groups: group_0: targets: [Linear] weights: num_bits: 4 type: int symmetric: true group_size: 32 strategy: group block_structure: null dynamic: false actorder: null observer: mse observer_kwargs: {} input_activations: null output_activations: null format: null targets: [Linear] ignore: [lm_head, 're:.*embed_tokens', 're:.*input_layernorm', 're:.*post_attention_layernorm', model.language_model.norm, 're:.*shared_experts.*', 're:model.language_model.layers.0.*', 're:.*mlp.gate', 're:model.visual.*'] mappings: - smooth_layer: re:.*input_layernorm$ balance_layers: ['re:.*q_proj$', 're:.*k_proj$', 're:.*v_proj$'] - smooth_layer: re:.*v_proj$ balance_layers: ['re:.*o_proj$'] - smooth_layer: re:.*post_attention_layernorm$ balance_layers: ['re:.*gate_proj$', 're:.*up_proj$'] - smooth_layer: re:.*up_proj$ balance_layers: ['re:.*down_proj$'] offload_device: !!python/object/apply:torch.device [cpu] duo_scaling: true