File size: 398 Bytes
bfe8310
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
test_stage:
  quant_modifiers:
    vLLMQuantizationModifier:
      ignore: [lm_head, model.layers.0.mlp.down_proj]
      config_groups:
        group_0:
          weights: {num_bits: 8, type: int, symmetric: true, strategy: tensor}
          input_activations: {num_bits: 8, type: int, symmetric: true, strategy: token, dynamic: true}
          output_activations: null
          targets: [Linear]