|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.3287019130641754, |
|
"global_step": 11048, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.9900826118433453e-05, |
|
"loss": 2.5088, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9801652236866898e-05, |
|
"loss": 2.4955, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.970247835530035e-05, |
|
"loss": 2.5035, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.96033044737338e-05, |
|
"loss": 2.4969, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.9504130592167246e-05, |
|
"loss": 2.4963, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9404956710600698e-05, |
|
"loss": 2.5023, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.930578282903415e-05, |
|
"loss": 2.5007, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9206608947467594e-05, |
|
"loss": 2.4953, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.9107435065901046e-05, |
|
"loss": 2.499, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9008261184334497e-05, |
|
"loss": 2.4919, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.8909087302767945e-05, |
|
"loss": 2.4988, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.8809913421201393e-05, |
|
"loss": 2.5076, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.8710739539634845e-05, |
|
"loss": 2.4915, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.8611565658068293e-05, |
|
"loss": 2.4823, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.851239177650174e-05, |
|
"loss": 2.4859, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.8413217894935193e-05, |
|
"loss": 2.4951, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.831404401336864e-05, |
|
"loss": 2.4816, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.821487013180209e-05, |
|
"loss": 2.485, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.811569625023554e-05, |
|
"loss": 2.4889, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.801652236866899e-05, |
|
"loss": 2.4812, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.7917348487102437e-05, |
|
"loss": 2.4774, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.781817460553589e-05, |
|
"loss": 2.4796, |
|
"step": 11000 |
|
} |
|
], |
|
"max_steps": 100833, |
|
"num_train_epochs": 3, |
|
"total_flos": 2.972249387945165e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|