File size: 724 Bytes
e9feb9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
quant_stage:
  quant_modifiers:
    SmoothQuantModifier:
      smoothing_strength: 0.8
      mappings:
      - - ['re:.*q_proj', 're:.*k_proj', 're:.*v_proj']
        - re:.*input_layernorm
      - - ['re:.*gate_proj', 're:.*up_proj']
        - re:.*post_attention_layernorm
      - - ['re:.*down_proj']
        - re:.*up_proj
    GPTQModifier:
      sequential_update: true
      dampening_frac: 0.01
      ignore: [lm_head]
      config_groups:
        group_0:
          targets: [Linear]
          weights: {num_bits: 8, type: int, symmetric: true, strategy: channel, observer: mse}
          input_activations: {num_bits: 8, type: int, symmetric: true, strategy: token, dynamic: true,
            observer: memoryless}