Updated compression_config to quantization_config
Browse files- config.json +9 -9
config.json
CHANGED
@@ -6,14 +6,6 @@
|
|
6 |
"attention_bias": false,
|
7 |
"attention_dropout": 0.0,
|
8 |
"bos_token_id": 1,
|
9 |
-
"compression_config": {
|
10 |
-
"sparsity_config": {
|
11 |
-
"format": "sparse-bitmask",
|
12 |
-
"global_sparsity": 44.05922139875622,
|
13 |
-
"registry_requires_subclass": false,
|
14 |
-
"sparsity_structure": "0:0"
|
15 |
-
}
|
16 |
-
},
|
17 |
"eos_token_id": 2,
|
18 |
"hidden_act": "silu",
|
19 |
"hidden_size": 2048,
|
@@ -32,5 +24,13 @@
|
|
32 |
"torch_dtype": "float16",
|
33 |
"transformers_version": "4.40.0",
|
34 |
"use_cache": true,
|
35 |
-
"vocab_size": 32000
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
}
|
|
|
6 |
"attention_bias": false,
|
7 |
"attention_dropout": 0.0,
|
8 |
"bos_token_id": 1,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
"eos_token_id": 2,
|
10 |
"hidden_act": "silu",
|
11 |
"hidden_size": 2048,
|
|
|
24 |
"torch_dtype": "float16",
|
25 |
"transformers_version": "4.40.0",
|
26 |
"use_cache": true,
|
27 |
+
"vocab_size": 32000,
|
28 |
+
"quantization_config": {
|
29 |
+
"sparsity_config": {
|
30 |
+
"format": "sparse-bitmask",
|
31 |
+
"global_sparsity": 44.05922139875622,
|
32 |
+
"registry_requires_subclass": false,
|
33 |
+
"sparsity_structure": "0:0"
|
34 |
+
}
|
35 |
+
}
|
36 |
}
|