|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 34, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.029411764705882353, |
|
"grad_norm": 0.734375, |
|
"learning_rate": 2e-05, |
|
"loss": 2.0227, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.058823529411764705, |
|
"grad_norm": 0.59765625, |
|
"learning_rate": 4e-05, |
|
"loss": 1.6615, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.08823529411764706, |
|
"grad_norm": 0.6640625, |
|
"learning_rate": 6e-05, |
|
"loss": 1.9138, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.11764705882352941, |
|
"grad_norm": 0.9375, |
|
"learning_rate": 8e-05, |
|
"loss": 2.2948, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.14705882352941177, |
|
"grad_norm": 0.9453125, |
|
"learning_rate": 0.0001, |
|
"loss": 2.0388, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.17647058823529413, |
|
"grad_norm": 0.7734375, |
|
"learning_rate": 0.00012, |
|
"loss": 1.9193, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.20588235294117646, |
|
"grad_norm": 0.8203125, |
|
"learning_rate": 0.00014, |
|
"loss": 1.7604, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.23529411764705882, |
|
"grad_norm": 0.734375, |
|
"learning_rate": 0.00016, |
|
"loss": 1.6406, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.2647058823529412, |
|
"grad_norm": 0.83984375, |
|
"learning_rate": 0.00018, |
|
"loss": 1.528, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.29411764705882354, |
|
"grad_norm": 0.71875, |
|
"learning_rate": 0.0002, |
|
"loss": 1.4681, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3235294117647059, |
|
"grad_norm": 1.015625, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 1.4515, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.35294117647058826, |
|
"grad_norm": 0.8984375, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 1.4636, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.38235294117647056, |
|
"grad_norm": 0.9140625, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 1.1527, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.4117647058823529, |
|
"grad_norm": 0.86328125, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 1.1761, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.4411764705882353, |
|
"grad_norm": 1.078125, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 1.3331, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.47058823529411764, |
|
"grad_norm": 0.98828125, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 1.3777, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.90234375, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 1.1361, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.5294117647058824, |
|
"grad_norm": 1.0234375, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 1.0423, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.5588235294117647, |
|
"grad_norm": 1.125, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 1.0441, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.5882352941176471, |
|
"grad_norm": 0.91796875, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 0.9957, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.6176470588235294, |
|
"grad_norm": 1.1328125, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 1.0449, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.6470588235294118, |
|
"grad_norm": 1.109375, |
|
"learning_rate": 0.0001, |
|
"loss": 1.0147, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.6764705882352942, |
|
"grad_norm": 1.140625, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 0.9797, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.7058823529411765, |
|
"grad_norm": 1.09375, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 0.8892, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.7352941176470589, |
|
"grad_norm": 1.171875, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.7879, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.7647058823529411, |
|
"grad_norm": 1.0546875, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.8614, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.7941176470588235, |
|
"grad_norm": 1.328125, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 1.0043, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.8235294117647058, |
|
"grad_norm": 1.0234375, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.9867, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.8529411764705882, |
|
"grad_norm": 0.96484375, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 0.892, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.8823529411764706, |
|
"grad_norm": 1.0078125, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 0.8257, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.9117647058823529, |
|
"grad_norm": 0.8359375, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.8757, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.9411764705882353, |
|
"grad_norm": 1.359375, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 1.098, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.9705882352941176, |
|
"grad_norm": 1.2265625, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 1.0088, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 1.4921875, |
|
"learning_rate": 0.0, |
|
"loss": 0.9341, |
|
"step": 34 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 34, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 12, |
|
"total_flos": 9.946320738818458e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|