mkmkmkmk commited on
Commit
966f022
1 Parent(s): cc4ef3c

add ds_config.json

Browse files
Files changed (1) hide show
  1. ds_config.json +53 -0
ds_config.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fp16": {
3
+ "enabled": "auto",
4
+ "loss_scale": 0,
5
+ "loss_scale_window": 1000,
6
+ "initial_scale_power": 16,
7
+ "hysteresis": 2,
8
+ "min_loss_scale": 1
9
+ },
10
+
11
+ "bf16": {
12
+ "enabled": "auto"
13
+ },
14
+
15
+ "optimizer": {
16
+ "type": "AdamW",
17
+ "params": {
18
+ "lr": "auto",
19
+ "betas": "auto",
20
+ "eps": "auto",
21
+ "weight_decay": "auto"
22
+ }
23
+ },
24
+
25
+ "scheduler": {
26
+ "type": "WarmupDecayLR",
27
+ "params": {
28
+ "total_num_steps": "auto",
29
+ "warmup_min_lr": "auto",
30
+ "warmup_max_lr": "auto",
31
+ "warmup_num_steps": "auto"
32
+ }
33
+ },
34
+
35
+ "zero_optimization": {
36
+ "stage": 2,
37
+ "offload_optimizer": {
38
+ "device": "cpu",
39
+ "pin_memory": true
40
+ },
41
+ "allgather_partitions": true,
42
+ "allgather_bucket_size": 5e8,
43
+ "overlap_comm": true,
44
+ "reduce_scatter": true,
45
+ "reduce_bucket_size": 5e8,
46
+ "contiguous_gradients": true
47
+ },
48
+
49
+ "gradient_accumulation_steps": "auto",
50
+ "gradient_clipping": "auto",
51
+ "train_batch_size": "auto",
52
+ "train_micro_batch_size_per_gpu": "auto"
53
+ }