# GPT-2 pretraining setup { # See README for MoE config docs! "moe_type": "megablocks", "moe_token_dropping": false, # Have 4 experts per layer (every 2 layers by default) "moe_num_experts": 4, # parallelism settings "enable_expert_tensor_parallelism": true, "pipe_parallel_size": 1, # not yet supported for MoE "model_parallel_size": 1, "moe_expert_parallel_size": 1, # model settings "num_layers": 12, "hidden_size": 768, "num_attention_heads": 12, "seq_length": 2048, "max_position_embeddings": 2048, "norm": "layernorm", "pos_emb": "rotary", "no_weight_tying": true, "gpt_j_residual": false, "output_layer_parallelism": "column", # these should provide some speedup but takes a while to build, set to true if desired "scaled_upper_triang_masked_softmax_fusion": false, "bias_gelu_fusion": false, "rope_fusion": false, # init methods "init_method": "small_init", "output_layer_init_method": "wang_init", # optimizer settings "optimizer": { "type": "Adam", "params": { "lr": 0.0006, "betas": [0.9, 0.95], "eps": 1.0e-8, } }, "min_lr": 0.00006, # for all zero_optimization options, see https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training "zero_optimization": { "stage": 0, "allgather_partitions": True, "allgather_bucket_size": 500000000, "overlap_comm": True, "reduce_scatter": True, "reduce_bucket_size": 500000000, "contiguous_gradients": True, }, # batch / data settings "train_micro_batch_size_per_gpu": 4, "data_impl": "mmap", # activation checkpointing "checkpoint_activations": true, "checkpoint_num_layers": 1, "partition_activations": true, "synchronize_each_layer": true, # regularization "gradient_clipping": 1.0, "weight_decay": 0.1, "hidden_dropout": 0.0, "attention_dropout": 0.0, # precision settings "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "hysteresis": 2, "min_loss_scale": 1 }, # misc. training settings "train_iters": 320000, "lr_decay_iters": 320000, "distributed_backend": "nccl", "lr_decay_style": "cosine", "warmup": 0.01, "checkpoint_factor": 10000, "eval_interval": 1000, "eval_iters": 10, # logging "log_interval": 10, "steps_per_print": 10, "keep_last_n_checkpoints": 4, "wall_clock_breakdown": true, # networking "hostfile": "/mock_path" }