Update README.md
#13
by
haoyang-amd
- opened
README.md
CHANGED
@@ -23,8 +23,9 @@ python3 quantize_quark.py \
|
|
23 |
--output_dir Meta-Llama-3.1-8B-Instruct-FP8-KV \
|
24 |
--quant_scheme w_fp8_a_fp8 \
|
25 |
--kv_cache_dtype fp8 \
|
26 |
-
--num_calib_data 128
|
27 |
-
--model_export quark_safetensors
|
|
|
28 |
|
29 |
# If model size is too large for single GPU, please use multi GPU instead.
|
30 |
python3 quantize_quark.py \
|
@@ -32,8 +33,9 @@ python3 quantize_quark.py \
|
|
32 |
--output_dir Meta-Llama-3.1-8B-Instruct-FP8-KV \
|
33 |
--quant_scheme w_fp8_a_fp8 \
|
34 |
--kv_cache_dtype fp8 \
|
35 |
-
--num_calib_data 128
|
36 |
--model_export quark_safetensors \
|
|
|
37 |
--multi_gpu
|
38 |
```
|
39 |
## Deployment
|
|
|
23 |
--output_dir Meta-Llama-3.1-8B-Instruct-FP8-KV \
|
24 |
--quant_scheme w_fp8_a_fp8 \
|
25 |
--kv_cache_dtype fp8 \
|
26 |
+
--num_calib_data 128 \
|
27 |
+
--model_export quark_safetensors \
|
28 |
+
--no_weight_matrix_merge
|
29 |
|
30 |
# If model size is too large for single GPU, please use multi GPU instead.
|
31 |
python3 quantize_quark.py \
|
|
|
33 |
--output_dir Meta-Llama-3.1-8B-Instruct-FP8-KV \
|
34 |
--quant_scheme w_fp8_a_fp8 \
|
35 |
--kv_cache_dtype fp8 \
|
36 |
+
--num_calib_data 128 \
|
37 |
--model_export quark_safetensors \
|
38 |
+
--no_weight_matrix_merge \
|
39 |
--multi_gpu
|
40 |
```
|
41 |
## Deployment
|