|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.7073170731707317, |
|
"eval_steps": 500, |
|
"global_step": 70, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.9995000000000005e-05, |
|
"loss": 1.5986, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.999e-05, |
|
"loss": 2.5591, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.9985e-05, |
|
"loss": 1.8778, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.9980000000000006e-05, |
|
"loss": 2.6038, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.9975e-05, |
|
"loss": 1.7417, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.997e-05, |
|
"loss": 1.6722, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.9965e-05, |
|
"loss": 1.4006, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.996e-05, |
|
"loss": 1.2633, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.9955e-05, |
|
"loss": 1.1828, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.995e-05, |
|
"loss": 1.0802, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.9945000000000004e-05, |
|
"loss": 0.9983, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.9940000000000006e-05, |
|
"loss": 1.0225, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.9935e-05, |
|
"loss": 0.996, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.9930000000000005e-05, |
|
"loss": 0.9677, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 4.992500000000001e-05, |
|
"loss": 0.8863, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.992e-05, |
|
"loss": 0.925, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.9915e-05, |
|
"loss": 0.8541, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.991e-05, |
|
"loss": 0.8547, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.9905000000000004e-05, |
|
"loss": 0.8159, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.99e-05, |
|
"loss": 0.8186, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.9895e-05, |
|
"loss": 0.7936, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 4.9890000000000005e-05, |
|
"loss": 0.792, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.9885e-05, |
|
"loss": 0.7438, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.9880000000000004e-05, |
|
"loss": 0.7641, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.9875000000000006e-05, |
|
"loss": 0.7644, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.987e-05, |
|
"loss": 0.7188, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 4.9865e-05, |
|
"loss": 0.7475, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.986e-05, |
|
"loss": 0.729, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.9855e-05, |
|
"loss": 0.719, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.9850000000000006e-05, |
|
"loss": 0.69, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 4.9845e-05, |
|
"loss": 0.6842, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.9840000000000004e-05, |
|
"loss": 0.7492, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.9835000000000007e-05, |
|
"loss": 0.697, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.983e-05, |
|
"loss": 0.6985, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 4.9825000000000005e-05, |
|
"loss": 0.6432, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 4.982e-05, |
|
"loss": 0.6493, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 4.9815e-05, |
|
"loss": 0.6856, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 4.981e-05, |
|
"loss": 0.6806, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 4.9805e-05, |
|
"loss": 0.6369, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 4.9800000000000004e-05, |
|
"loss": 0.612, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 4.9795e-05, |
|
"loss": 0.4179, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.979e-05, |
|
"loss": 0.3876, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 4.9785000000000005e-05, |
|
"loss": 0.3741, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.978e-05, |
|
"loss": 0.3549, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 4.9775000000000004e-05, |
|
"loss": 0.3766, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.977e-05, |
|
"loss": 0.3832, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 4.9765e-05, |
|
"loss": 0.3699, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 4.976e-05, |
|
"loss": 0.3682, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 4.9755e-05, |
|
"loss": 0.4074, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 4.975e-05, |
|
"loss": 0.3591, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 4.9745000000000006e-05, |
|
"loss": 0.3841, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 4.974e-05, |
|
"loss": 0.376, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 4.9735000000000004e-05, |
|
"loss": 0.4079, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 4.973000000000001e-05, |
|
"loss": 0.3882, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 4.9725e-05, |
|
"loss": 0.3948, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 4.972e-05, |
|
"loss": 0.4103, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 4.9715e-05, |
|
"loss": 0.4097, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 4.9710000000000003e-05, |
|
"loss": 0.3772, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 4.9705e-05, |
|
"loss": 0.402, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 4.97e-05, |
|
"loss": 0.386, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 4.9695000000000004e-05, |
|
"loss": 0.3865, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 4.969e-05, |
|
"loss": 0.4051, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 4.9685e-05, |
|
"loss": 0.4221, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 4.9680000000000005e-05, |
|
"loss": 0.4148, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 4.967500000000001e-05, |
|
"loss": 0.3827, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.967e-05, |
|
"loss": 0.4125, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 4.9665e-05, |
|
"loss": 0.3854, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.966e-05, |
|
"loss": 0.3953, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.9655000000000005e-05, |
|
"loss": 0.3818, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 4.965e-05, |
|
"loss": 0.3764, |
|
"step": 70 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 10000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 244, |
|
"save_steps": 10, |
|
"total_flos": 1.1687420870583696e+16, |
|
"train_batch_size": 3584, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|