molmo-72b-awq / recipe.yaml
ronantakizawa's picture
Upload AWQ 4-bit quantized Molmo-72B (~37.8GB, 73.9% reduction)
3e87388 verified
raw
history blame contribute delete
608 Bytes
default_stage:
default_modifiers:
AWQModifier:
targets: [Linear]
ignore: ['re:.*lm_head', 're:.*vision.*', 're:.*connector.*', 're:.*embed.*']
scheme: W4A16
mappings:
- smooth_layer: re:.*input_layernorm$
balance_layers: ['re:.*q_proj$', 're:.*k_proj$', 're:.*v_proj$']
- smooth_layer: re:.*v_proj$
balance_layers: ['re:.*o_proj$']
- smooth_layer: re:.*post_attention_layernorm$
balance_layers: ['re:.*gate_proj$', 're:.*up_proj$']
- smooth_layer: re:.*up_proj$
balance_layers: ['re:.*down_proj$']
duo_scaling: true