quant_stage: quant_modifiers: SmoothQuantModifier: smoothing_strength: 0.8 mappings: - - ['re:.*q_proj', 're:.*k_proj', 're:.*v_proj'] - re:.*input_layernorm - - ['re:.*gate_proj', 're:.*up_proj'] - re:.*post_attention_layernorm - - ['re:.*down_proj'] - re:.*up_proj GPTQModifier: sequential_update: false dampening_frac: 0.1 ignore: [lm_head] config_groups: group_0: targets: [Linear] weights: {num_bits: 8, type: int, symmetric: true, strategy: channel} input_activations: {num_bits: 8, type: int, symmetric: true, dynamic: true, strategy: token}