|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 4.0,
|
|
"eval_steps": 500,
|
|
"global_step": 184,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.21739130434782608,
|
|
"grad_norm": 98.25260162353516,
|
|
"learning_rate": 1.0000000000000002e-06,
|
|
"loss": 3.2276,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.43478260869565216,
|
|
"grad_norm": 77.15660858154297,
|
|
"learning_rate": 2.0000000000000003e-06,
|
|
"loss": 2.4812,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.6521739130434783,
|
|
"grad_norm": 44.03319549560547,
|
|
"learning_rate": 3e-06,
|
|
"loss": 2.4834,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.8695652173913043,
|
|
"grad_norm": 36.34611892700195,
|
|
"learning_rate": 4.000000000000001e-06,
|
|
"loss": 1.8211,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 1.0869565217391304,
|
|
"grad_norm": 26.440757751464844,
|
|
"learning_rate": 5e-06,
|
|
"loss": 1.8561,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 1.3043478260869565,
|
|
"grad_norm": 31.280738830566406,
|
|
"learning_rate": 6e-06,
|
|
"loss": 1.3881,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 1.5217391304347827,
|
|
"grad_norm": 18.75351333618164,
|
|
"learning_rate": 7e-06,
|
|
"loss": 1.1658,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 1.7391304347826086,
|
|
"grad_norm": 31.8607177734375,
|
|
"learning_rate": 8.000000000000001e-06,
|
|
"loss": 1.4099,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 1.9565217391304348,
|
|
"grad_norm": 40.35784912109375,
|
|
"learning_rate": 9e-06,
|
|
"loss": 1.2576,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 2.1739130434782608,
|
|
"grad_norm": 14.189046859741211,
|
|
"learning_rate": 1e-05,
|
|
"loss": 0.9798,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 2.391304347826087,
|
|
"grad_norm": 12.539738655090332,
|
|
"learning_rate": 8.80952380952381e-06,
|
|
"loss": 1.0279,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 2.608695652173913,
|
|
"grad_norm": 11.521944046020508,
|
|
"learning_rate": 7.61904761904762e-06,
|
|
"loss": 0.7695,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 2.8260869565217392,
|
|
"grad_norm": 16.423460006713867,
|
|
"learning_rate": 6.4285714285714295e-06,
|
|
"loss": 0.8634,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 3.0434782608695654,
|
|
"grad_norm": 14.004690170288086,
|
|
"learning_rate": 5.2380952380952384e-06,
|
|
"loss": 0.8086,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 3.260869565217391,
|
|
"grad_norm": 19.613393783569336,
|
|
"learning_rate": 4.047619047619048e-06,
|
|
"loss": 0.6529,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 3.4782608695652173,
|
|
"grad_norm": 10.613371849060059,
|
|
"learning_rate": 2.8571428571428573e-06,
|
|
"loss": 0.6305,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 3.6956521739130435,
|
|
"grad_norm": 19.775999069213867,
|
|
"learning_rate": 1.6666666666666667e-06,
|
|
"loss": 0.5669,
|
|
"step": 170
|
|
},
|
|
{
|
|
"epoch": 3.9130434782608696,
|
|
"grad_norm": 22.53849220275879,
|
|
"learning_rate": 4.7619047619047623e-07,
|
|
"loss": 0.6313,
|
|
"step": 180
|
|
}
|
|
],
|
|
"logging_steps": 10,
|
|
"max_steps": 184,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 4,
|
|
"save_steps": 500,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 285336058355712.0,
|
|
"train_batch_size": 8,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|