{ # finetuning option "finetune": true, # init methods "init_method": "small_init", "output_layer_init_method": "wang_init", # optimizer settings "optimizer": { "type": "Adam", "params": { "lr": 0.0002, "betas": [0.9, 0.95], "eps": 1.0e-8, } }, "min_lr": 0.00002, "override_lr_scheduler": true, # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training "zero_optimization": { "stage": 1, "allgather_partitions": True, "allgather_bucket_size": 500000000, "overlap_comm": True, "reduce_scatter": True, "reduce_bucket_size": 500000000, "contiguous_gradients": True, }, # batch / data settings "train_micro_batch_size_per_gpu": 4, "data_impl": "mmap", # activation checkpointing "checkpoint_activations": true, "checkpoint_num_layers": 1, "partition_activations": true, "synchronize_each_layer": true, # regularization "gradient_clipping": 1.0, "weight_decay": 0.1, "hidden_dropout": 0, "attention_dropout": 0, # precision settings "fp16": { "fp16": true, "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "hysteresis": 2, "min_loss_scale": 1 }, # misc. training settings "train_iters": 320000, "lr_decay_iters": 320000, "distributed_backend": "nccl", "lr_decay_style": "cosine", "warmup": 0.01, "checkpoint_factor": 10000, "eval_interval": 1000, "eval_iters": 10, # logging "log_interval": 100, "steps_per_print": 10, "keep_last_n_checkpoints": 4, "wall_clock_breakdown": true, "mlp_multiple_of": 256, }