Spaces:
Build error
Build error
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 99.9976976208749, | |
"global_step": 32500, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 1.0, | |
"learning_rate": 1.6250000000000002e-05, | |
"loss": 1.0806, | |
"step": 325 | |
}, | |
{ | |
"epoch": 1.0, | |
"eval_loss": 0.7577828168869019, | |
"eval_runtime": 1.6267, | |
"eval_samples_per_second": 307.365, | |
"eval_steps_per_second": 38.728, | |
"step": 325 | |
}, | |
{ | |
"epoch": 2.0, | |
"learning_rate": 3.2500000000000004e-05, | |
"loss": 0.8255, | |
"step": 650 | |
}, | |
{ | |
"epoch": 2.0, | |
"eval_loss": 0.556172788143158, | |
"eval_runtime": 1.6277, | |
"eval_samples_per_second": 307.186, | |
"eval_steps_per_second": 38.705, | |
"step": 650 | |
}, | |
{ | |
"epoch": 3.0, | |
"learning_rate": 4.875e-05, | |
"loss": 0.6001, | |
"step": 975 | |
}, | |
{ | |
"epoch": 3.0, | |
"eval_loss": 0.421012818813324, | |
"eval_runtime": 1.6279, | |
"eval_samples_per_second": 307.149, | |
"eval_steps_per_second": 38.701, | |
"step": 975 | |
}, | |
{ | |
"epoch": 4.0, | |
"learning_rate": 4.9523809523809525e-05, | |
"loss": 0.4523, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 4.0, | |
"eval_loss": 0.3349049389362335, | |
"eval_runtime": 1.6071, | |
"eval_samples_per_second": 311.117, | |
"eval_steps_per_second": 39.201, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 5.0, | |
"learning_rate": 4.900793650793651e-05, | |
"loss": 0.3601, | |
"step": 1625 | |
}, | |
{ | |
"epoch": 5.0, | |
"eval_loss": 0.29192015528678894, | |
"eval_runtime": 1.6111, | |
"eval_samples_per_second": 310.348, | |
"eval_steps_per_second": 39.104, | |
"step": 1625 | |
}, | |
{ | |
"epoch": 6.0, | |
"learning_rate": 4.84920634920635e-05, | |
"loss": 0.3035, | |
"step": 1950 | |
}, | |
{ | |
"epoch": 6.0, | |
"eval_loss": 0.25816571712493896, | |
"eval_runtime": 1.6209, | |
"eval_samples_per_second": 308.476, | |
"eval_steps_per_second": 38.868, | |
"step": 1950 | |
}, | |
{ | |
"epoch": 7.0, | |
"learning_rate": 4.797619047619048e-05, | |
"loss": 0.2648, | |
"step": 2275 | |
}, | |
{ | |
"epoch": 7.0, | |
"eval_loss": 0.239731565117836, | |
"eval_runtime": 1.6133, | |
"eval_samples_per_second": 309.927, | |
"eval_steps_per_second": 39.051, | |
"step": 2275 | |
}, | |
{ | |
"epoch": 8.0, | |
"learning_rate": 4.746031746031746e-05, | |
"loss": 0.2367, | |
"step": 2600 | |
}, | |
{ | |
"epoch": 8.0, | |
"eval_loss": 0.22367145121097565, | |
"eval_runtime": 1.6208, | |
"eval_samples_per_second": 308.483, | |
"eval_steps_per_second": 38.869, | |
"step": 2600 | |
}, | |
{ | |
"epoch": 9.0, | |
"learning_rate": 4.6944444444444446e-05, | |
"loss": 0.216, | |
"step": 2925 | |
}, | |
{ | |
"epoch": 9.0, | |
"eval_loss": 0.21722149848937988, | |
"eval_runtime": 1.6243, | |
"eval_samples_per_second": 307.822, | |
"eval_steps_per_second": 38.786, | |
"step": 2925 | |
}, | |
{ | |
"epoch": 10.0, | |
"learning_rate": 4.642857142857143e-05, | |
"loss": 0.199, | |
"step": 3250 | |
}, | |
{ | |
"epoch": 10.0, | |
"eval_loss": 0.21139533817768097, | |
"eval_runtime": 1.624, | |
"eval_samples_per_second": 307.888, | |
"eval_steps_per_second": 38.794, | |
"step": 3250 | |
}, | |
{ | |
"epoch": 11.0, | |
"learning_rate": 4.591269841269841e-05, | |
"loss": 0.1856, | |
"step": 3575 | |
}, | |
{ | |
"epoch": 11.0, | |
"eval_loss": 0.21221421658992767, | |
"eval_runtime": 1.6173, | |
"eval_samples_per_second": 309.16, | |
"eval_steps_per_second": 38.954, | |
"step": 3575 | |
}, | |
{ | |
"epoch": 12.0, | |
"learning_rate": 4.5396825396825395e-05, | |
"loss": 0.1751, | |
"step": 3900 | |
}, | |
{ | |
"epoch": 12.0, | |
"eval_loss": 0.20572426915168762, | |
"eval_runtime": 1.6208, | |
"eval_samples_per_second": 308.486, | |
"eval_steps_per_second": 38.869, | |
"step": 3900 | |
}, | |
{ | |
"epoch": 13.0, | |
"learning_rate": 4.4880952380952385e-05, | |
"loss": 0.1667, | |
"step": 4225 | |
}, | |
{ | |
"epoch": 13.0, | |
"eval_loss": 0.20298737287521362, | |
"eval_runtime": 1.6143, | |
"eval_samples_per_second": 309.733, | |
"eval_steps_per_second": 39.026, | |
"step": 4225 | |
}, | |
{ | |
"epoch": 14.0, | |
"learning_rate": 4.436507936507937e-05, | |
"loss": 0.1592, | |
"step": 4550 | |
}, | |
{ | |
"epoch": 14.0, | |
"eval_loss": 0.20216256380081177, | |
"eval_runtime": 1.6176, | |
"eval_samples_per_second": 309.097, | |
"eval_steps_per_second": 38.946, | |
"step": 4550 | |
}, | |
{ | |
"epoch": 15.0, | |
"learning_rate": 4.384920634920635e-05, | |
"loss": 0.1528, | |
"step": 4875 | |
}, | |
{ | |
"epoch": 15.0, | |
"eval_loss": 0.19866463541984558, | |
"eval_runtime": 1.5537, | |
"eval_samples_per_second": 321.813, | |
"eval_steps_per_second": 40.548, | |
"step": 4875 | |
}, | |
{ | |
"epoch": 16.0, | |
"learning_rate": 4.3333333333333334e-05, | |
"loss": 0.1475, | |
"step": 5200 | |
}, | |
{ | |
"epoch": 16.0, | |
"eval_loss": 0.194817915558815, | |
"eval_runtime": 1.6315, | |
"eval_samples_per_second": 306.466, | |
"eval_steps_per_second": 38.615, | |
"step": 5200 | |
}, | |
{ | |
"epoch": 17.0, | |
"learning_rate": 4.282063492063492e-05, | |
"loss": 0.1435, | |
"step": 5525 | |
}, | |
{ | |
"epoch": 17.0, | |
"eval_loss": 0.20059792697429657, | |
"eval_runtime": 1.6283, | |
"eval_samples_per_second": 307.061, | |
"eval_steps_per_second": 38.69, | |
"step": 5525 | |
}, | |
{ | |
"epoch": 18.0, | |
"learning_rate": 4.230476190476191e-05, | |
"loss": 0.1394, | |
"step": 5850 | |
}, | |
{ | |
"epoch": 18.0, | |
"eval_loss": 0.19283074140548706, | |
"eval_runtime": 1.6289, | |
"eval_samples_per_second": 306.96, | |
"eval_steps_per_second": 38.677, | |
"step": 5850 | |
}, | |
{ | |
"epoch": 19.0, | |
"learning_rate": 4.178888888888889e-05, | |
"loss": 0.1353, | |
"step": 6175 | |
}, | |
{ | |
"epoch": 19.0, | |
"eval_loss": 0.19786441326141357, | |
"eval_runtime": 1.6304, | |
"eval_samples_per_second": 306.678, | |
"eval_steps_per_second": 38.641, | |
"step": 6175 | |
}, | |
{ | |
"epoch": 20.0, | |
"learning_rate": 4.1273015873015876e-05, | |
"loss": 0.132, | |
"step": 6500 | |
}, | |
{ | |
"epoch": 20.0, | |
"eval_loss": 0.1935906708240509, | |
"eval_runtime": 1.6121, | |
"eval_samples_per_second": 310.148, | |
"eval_steps_per_second": 39.079, | |
"step": 6500 | |
}, | |
{ | |
"epoch": 21.0, | |
"learning_rate": 4.075714285714286e-05, | |
"loss": 0.1286, | |
"step": 6825 | |
}, | |
{ | |
"epoch": 21.0, | |
"eval_loss": 0.19877703487873077, | |
"eval_runtime": 1.616, | |
"eval_samples_per_second": 309.413, | |
"eval_steps_per_second": 38.986, | |
"step": 6825 | |
}, | |
{ | |
"epoch": 22.0, | |
"learning_rate": 4.024126984126984e-05, | |
"loss": 0.126, | |
"step": 7150 | |
}, | |
{ | |
"epoch": 22.0, | |
"eval_loss": 0.19710922241210938, | |
"eval_runtime": 1.6082, | |
"eval_samples_per_second": 310.904, | |
"eval_steps_per_second": 39.174, | |
"step": 7150 | |
}, | |
{ | |
"epoch": 23.0, | |
"learning_rate": 3.9725396825396825e-05, | |
"loss": 0.1235, | |
"step": 7475 | |
}, | |
{ | |
"epoch": 23.0, | |
"eval_loss": 0.19361434876918793, | |
"eval_runtime": 1.6189, | |
"eval_samples_per_second": 308.861, | |
"eval_steps_per_second": 38.916, | |
"step": 7475 | |
}, | |
{ | |
"epoch": 24.0, | |
"learning_rate": 3.921111111111112e-05, | |
"loss": 0.1222, | |
"step": 7800 | |
}, | |
{ | |
"epoch": 24.0, | |
"eval_loss": 0.19744054973125458, | |
"eval_runtime": 1.6204, | |
"eval_samples_per_second": 308.557, | |
"eval_steps_per_second": 38.878, | |
"step": 7800 | |
}, | |
{ | |
"epoch": 25.0, | |
"learning_rate": 3.869523809523809e-05, | |
"loss": 0.1197, | |
"step": 8125 | |
}, | |
{ | |
"epoch": 25.0, | |
"eval_loss": 0.1937996745109558, | |
"eval_runtime": 1.6179, | |
"eval_samples_per_second": 309.05, | |
"eval_steps_per_second": 38.94, | |
"step": 8125 | |
}, | |
{ | |
"epoch": 26.0, | |
"learning_rate": 3.817936507936508e-05, | |
"loss": 0.1178, | |
"step": 8450 | |
}, | |
{ | |
"epoch": 26.0, | |
"eval_loss": 0.195204496383667, | |
"eval_runtime": 1.6285, | |
"eval_samples_per_second": 307.033, | |
"eval_steps_per_second": 38.686, | |
"step": 8450 | |
}, | |
{ | |
"epoch": 27.0, | |
"learning_rate": 3.7663492063492066e-05, | |
"loss": 0.1158, | |
"step": 8775 | |
}, | |
{ | |
"epoch": 27.0, | |
"eval_loss": 0.1947883665561676, | |
"eval_runtime": 1.631, | |
"eval_samples_per_second": 306.561, | |
"eval_steps_per_second": 38.627, | |
"step": 8775 | |
}, | |
{ | |
"epoch": 28.0, | |
"learning_rate": 3.714761904761905e-05, | |
"loss": 0.1146, | |
"step": 9100 | |
}, | |
{ | |
"epoch": 28.0, | |
"eval_loss": 0.19417957961559296, | |
"eval_runtime": 1.6321, | |
"eval_samples_per_second": 306.362, | |
"eval_steps_per_second": 38.602, | |
"step": 9100 | |
}, | |
{ | |
"epoch": 29.0, | |
"learning_rate": 3.663174603174603e-05, | |
"loss": 0.1126, | |
"step": 9425 | |
}, | |
{ | |
"epoch": 29.0, | |
"eval_loss": 0.19478720426559448, | |
"eval_runtime": 1.6326, | |
"eval_samples_per_second": 306.262, | |
"eval_steps_per_second": 38.589, | |
"step": 9425 | |
}, | |
{ | |
"epoch": 30.0, | |
"learning_rate": 3.611587301587302e-05, | |
"loss": 0.1113, | |
"step": 9750 | |
}, | |
{ | |
"epoch": 30.0, | |
"eval_loss": 0.1987549215555191, | |
"eval_runtime": 1.6326, | |
"eval_samples_per_second": 306.255, | |
"eval_steps_per_second": 38.588, | |
"step": 9750 | |
}, | |
{ | |
"epoch": 31.0, | |
"learning_rate": 3.56e-05, | |
"loss": 0.1103, | |
"step": 10075 | |
}, | |
{ | |
"epoch": 31.0, | |
"eval_loss": 0.19776514172554016, | |
"eval_runtime": 1.6335, | |
"eval_samples_per_second": 306.099, | |
"eval_steps_per_second": 38.568, | |
"step": 10075 | |
}, | |
{ | |
"epoch": 32.0, | |
"learning_rate": 3.508412698412699e-05, | |
"loss": 0.1091, | |
"step": 10400 | |
}, | |
{ | |
"epoch": 32.0, | |
"eval_loss": 0.19811394810676575, | |
"eval_runtime": 1.6237, | |
"eval_samples_per_second": 307.933, | |
"eval_steps_per_second": 38.8, | |
"step": 10400 | |
}, | |
{ | |
"epoch": 33.0, | |
"learning_rate": 3.456825396825397e-05, | |
"loss": 0.108, | |
"step": 10725 | |
}, | |
{ | |
"epoch": 33.0, | |
"eval_loss": 0.1984662413597107, | |
"eval_runtime": 1.6185, | |
"eval_samples_per_second": 308.924, | |
"eval_steps_per_second": 38.924, | |
"step": 10725 | |
}, | |
{ | |
"epoch": 34.0, | |
"learning_rate": 3.4052380952380954e-05, | |
"loss": 0.1071, | |
"step": 11050 | |
}, | |
{ | |
"epoch": 34.0, | |
"eval_loss": 0.19538824260234833, | |
"eval_runtime": 1.6308, | |
"eval_samples_per_second": 306.593, | |
"eval_steps_per_second": 38.631, | |
"step": 11050 | |
}, | |
{ | |
"epoch": 35.0, | |
"learning_rate": 3.353650793650794e-05, | |
"loss": 0.1058, | |
"step": 11375 | |
}, | |
{ | |
"epoch": 35.0, | |
"eval_loss": 0.19716860353946686, | |
"eval_runtime": 1.6308, | |
"eval_samples_per_second": 306.593, | |
"eval_steps_per_second": 38.631, | |
"step": 11375 | |
}, | |
{ | |
"epoch": 36.0, | |
"learning_rate": 3.3020634920634926e-05, | |
"loss": 0.1046, | |
"step": 11700 | |
}, | |
{ | |
"epoch": 36.0, | |
"eval_loss": 0.19588053226470947, | |
"eval_runtime": 1.6331, | |
"eval_samples_per_second": 306.174, | |
"eval_steps_per_second": 38.578, | |
"step": 11700 | |
}, | |
{ | |
"epoch": 37.0, | |
"learning_rate": 3.25047619047619e-05, | |
"loss": 0.1039, | |
"step": 12025 | |
}, | |
{ | |
"epoch": 37.0, | |
"eval_loss": 0.19945307075977325, | |
"eval_runtime": 1.6342, | |
"eval_samples_per_second": 305.961, | |
"eval_steps_per_second": 38.551, | |
"step": 12025 | |
}, | |
{ | |
"epoch": 38.0, | |
"learning_rate": 3.198888888888889e-05, | |
"loss": 0.1033, | |
"step": 12350 | |
}, | |
{ | |
"epoch": 38.0, | |
"eval_loss": 0.19728580117225647, | |
"eval_runtime": 1.6182, | |
"eval_samples_per_second": 308.98, | |
"eval_steps_per_second": 38.931, | |
"step": 12350 | |
}, | |
{ | |
"epoch": 39.0, | |
"learning_rate": 3.1473015873015875e-05, | |
"loss": 0.1021, | |
"step": 12675 | |
}, | |
{ | |
"epoch": 39.0, | |
"eval_loss": 0.19883698225021362, | |
"eval_runtime": 1.6098, | |
"eval_samples_per_second": 310.588, | |
"eval_steps_per_second": 39.134, | |
"step": 12675 | |
}, | |
{ | |
"epoch": 40.0, | |
"learning_rate": 3.095714285714286e-05, | |
"loss": 0.1014, | |
"step": 13000 | |
}, | |
{ | |
"epoch": 40.0, | |
"eval_loss": 0.19719742238521576, | |
"eval_runtime": 1.6198, | |
"eval_samples_per_second": 308.678, | |
"eval_steps_per_second": 38.893, | |
"step": 13000 | |
}, | |
{ | |
"epoch": 41.0, | |
"learning_rate": 3.044126984126984e-05, | |
"loss": 0.101, | |
"step": 13325 | |
}, | |
{ | |
"epoch": 41.0, | |
"eval_loss": 0.1962202489376068, | |
"eval_runtime": 1.6168, | |
"eval_samples_per_second": 309.257, | |
"eval_steps_per_second": 38.966, | |
"step": 13325 | |
}, | |
{ | |
"epoch": 42.0, | |
"learning_rate": 2.9925396825396828e-05, | |
"loss": 0.1001, | |
"step": 13650 | |
}, | |
{ | |
"epoch": 42.0, | |
"eval_loss": 0.19871322810649872, | |
"eval_runtime": 1.6213, | |
"eval_samples_per_second": 308.398, | |
"eval_steps_per_second": 38.858, | |
"step": 13650 | |
}, | |
{ | |
"epoch": 43.0, | |
"learning_rate": 2.9409523809523807e-05, | |
"loss": 0.0993, | |
"step": 13975 | |
}, | |
{ | |
"epoch": 43.0, | |
"eval_loss": 0.20118726789951324, | |
"eval_runtime": 1.6162, | |
"eval_samples_per_second": 309.364, | |
"eval_steps_per_second": 38.98, | |
"step": 13975 | |
}, | |
{ | |
"epoch": 44.0, | |
"learning_rate": 2.8893650793650794e-05, | |
"loss": 0.0989, | |
"step": 14300 | |
}, | |
{ | |
"epoch": 44.0, | |
"eval_loss": 0.20416098833084106, | |
"eval_runtime": 1.615, | |
"eval_samples_per_second": 309.592, | |
"eval_steps_per_second": 39.009, | |
"step": 14300 | |
}, | |
{ | |
"epoch": 45.0, | |
"learning_rate": 2.837777777777778e-05, | |
"loss": 0.0981, | |
"step": 14625 | |
}, | |
{ | |
"epoch": 45.0, | |
"eval_loss": 0.20017844438552856, | |
"eval_runtime": 1.6211, | |
"eval_samples_per_second": 308.434, | |
"eval_steps_per_second": 38.863, | |
"step": 14625 | |
}, | |
{ | |
"epoch": 46.0, | |
"learning_rate": 2.786190476190476e-05, | |
"loss": 0.0977, | |
"step": 14950 | |
}, | |
{ | |
"epoch": 46.0, | |
"eval_loss": 0.1984575241804123, | |
"eval_runtime": 1.6178, | |
"eval_samples_per_second": 309.053, | |
"eval_steps_per_second": 38.941, | |
"step": 14950 | |
}, | |
{ | |
"epoch": 47.0, | |
"learning_rate": 2.7346031746031746e-05, | |
"loss": 0.0972, | |
"step": 15275 | |
}, | |
{ | |
"epoch": 47.0, | |
"eval_loss": 0.1984962373971939, | |
"eval_runtime": 1.6179, | |
"eval_samples_per_second": 309.052, | |
"eval_steps_per_second": 38.941, | |
"step": 15275 | |
}, | |
{ | |
"epoch": 48.0, | |
"learning_rate": 2.6830158730158732e-05, | |
"loss": 0.0967, | |
"step": 15600 | |
}, | |
{ | |
"epoch": 48.0, | |
"eval_loss": 0.20049268007278442, | |
"eval_runtime": 1.6168, | |
"eval_samples_per_second": 309.261, | |
"eval_steps_per_second": 38.967, | |
"step": 15600 | |
}, | |
{ | |
"epoch": 49.0, | |
"learning_rate": 2.6315873015873017e-05, | |
"loss": 0.0961, | |
"step": 15925 | |
}, | |
{ | |
"epoch": 49.0, | |
"eval_loss": 0.20437875390052795, | |
"eval_runtime": 1.6219, | |
"eval_samples_per_second": 308.282, | |
"eval_steps_per_second": 38.843, | |
"step": 15925 | |
}, | |
{ | |
"epoch": 50.0, | |
"learning_rate": 2.58e-05, | |
"loss": 0.0954, | |
"step": 16250 | |
}, | |
{ | |
"epoch": 50.0, | |
"eval_loss": 0.20537793636322021, | |
"eval_runtime": 1.615, | |
"eval_samples_per_second": 309.596, | |
"eval_steps_per_second": 39.009, | |
"step": 16250 | |
}, | |
{ | |
"epoch": 51.0, | |
"learning_rate": 2.5284126984126987e-05, | |
"loss": 0.095, | |
"step": 16575 | |
}, | |
{ | |
"epoch": 51.0, | |
"eval_loss": 0.20233562588691711, | |
"eval_runtime": 1.6189, | |
"eval_samples_per_second": 308.861, | |
"eval_steps_per_second": 38.916, | |
"step": 16575 | |
}, | |
{ | |
"epoch": 52.0, | |
"learning_rate": 2.476825396825397e-05, | |
"loss": 0.0946, | |
"step": 16900 | |
}, | |
{ | |
"epoch": 52.0, | |
"eval_loss": 0.2065245360136032, | |
"eval_runtime": 1.6223, | |
"eval_samples_per_second": 308.199, | |
"eval_steps_per_second": 38.833, | |
"step": 16900 | |
}, | |
{ | |
"epoch": 53.0, | |
"learning_rate": 2.4253968253968255e-05, | |
"loss": 0.0943, | |
"step": 17225 | |
}, | |
{ | |
"epoch": 53.0, | |
"eval_loss": 0.2037389576435089, | |
"eval_runtime": 1.6223, | |
"eval_samples_per_second": 308.208, | |
"eval_steps_per_second": 38.834, | |
"step": 17225 | |
}, | |
{ | |
"epoch": 54.0, | |
"learning_rate": 2.373809523809524e-05, | |
"loss": 0.0938, | |
"step": 17550 | |
}, | |
{ | |
"epoch": 54.0, | |
"eval_loss": 0.20668557286262512, | |
"eval_runtime": 1.6254, | |
"eval_samples_per_second": 307.609, | |
"eval_steps_per_second": 38.759, | |
"step": 17550 | |
}, | |
{ | |
"epoch": 55.0, | |
"learning_rate": 2.3222222222222224e-05, | |
"loss": 0.0935, | |
"step": 17875 | |
}, | |
{ | |
"epoch": 55.0, | |
"eval_loss": 0.2060803920030594, | |
"eval_runtime": 1.6215, | |
"eval_samples_per_second": 308.359, | |
"eval_steps_per_second": 38.853, | |
"step": 17875 | |
}, | |
{ | |
"epoch": 56.0, | |
"learning_rate": 2.2706349206349207e-05, | |
"loss": 0.0932, | |
"step": 18200 | |
}, | |
{ | |
"epoch": 56.0, | |
"eval_loss": 0.20473302900791168, | |
"eval_runtime": 1.6144, | |
"eval_samples_per_second": 309.707, | |
"eval_steps_per_second": 39.023, | |
"step": 18200 | |
}, | |
{ | |
"epoch": 57.0, | |
"learning_rate": 2.2192063492063492e-05, | |
"loss": 0.093, | |
"step": 18525 | |
}, | |
{ | |
"epoch": 57.0, | |
"eval_loss": 0.2071973830461502, | |
"eval_runtime": 1.657, | |
"eval_samples_per_second": 301.749, | |
"eval_steps_per_second": 38.02, | |
"step": 18525 | |
}, | |
{ | |
"epoch": 58.0, | |
"learning_rate": 2.1676190476190478e-05, | |
"loss": 0.0923, | |
"step": 18850 | |
}, | |
{ | |
"epoch": 58.0, | |
"eval_loss": 0.20672236382961273, | |
"eval_runtime": 1.619, | |
"eval_samples_per_second": 308.841, | |
"eval_steps_per_second": 38.914, | |
"step": 18850 | |
}, | |
{ | |
"epoch": 59.0, | |
"learning_rate": 2.116031746031746e-05, | |
"loss": 0.092, | |
"step": 19175 | |
}, | |
{ | |
"epoch": 59.0, | |
"eval_loss": 0.20423702895641327, | |
"eval_runtime": 1.6318, | |
"eval_samples_per_second": 306.408, | |
"eval_steps_per_second": 38.607, | |
"step": 19175 | |
}, | |
{ | |
"epoch": 60.0, | |
"learning_rate": 2.0644444444444447e-05, | |
"loss": 0.0916, | |
"step": 19500 | |
}, | |
{ | |
"epoch": 60.0, | |
"eval_loss": 0.20601221919059753, | |
"eval_runtime": 1.6198, | |
"eval_samples_per_second": 308.675, | |
"eval_steps_per_second": 38.893, | |
"step": 19500 | |
}, | |
{ | |
"epoch": 61.0, | |
"learning_rate": 2.012857142857143e-05, | |
"loss": 0.0913, | |
"step": 19825 | |
}, | |
{ | |
"epoch": 61.0, | |
"eval_loss": 0.20910584926605225, | |
"eval_runtime": 1.6185, | |
"eval_samples_per_second": 308.926, | |
"eval_steps_per_second": 38.925, | |
"step": 19825 | |
}, | |
{ | |
"epoch": 62.0, | |
"learning_rate": 1.9612698412698413e-05, | |
"loss": 0.091, | |
"step": 20150 | |
}, | |
{ | |
"epoch": 62.0, | |
"eval_loss": 0.20686961710453033, | |
"eval_runtime": 1.6312, | |
"eval_samples_per_second": 306.52, | |
"eval_steps_per_second": 38.621, | |
"step": 20150 | |
}, | |
{ | |
"epoch": 63.0, | |
"learning_rate": 1.90968253968254e-05, | |
"loss": 0.0907, | |
"step": 20475 | |
}, | |
{ | |
"epoch": 63.0, | |
"eval_loss": 0.20606577396392822, | |
"eval_runtime": 1.6309, | |
"eval_samples_per_second": 306.575, | |
"eval_steps_per_second": 38.628, | |
"step": 20475 | |
}, | |
{ | |
"epoch": 64.0, | |
"learning_rate": 1.8580952380952383e-05, | |
"loss": 0.0905, | |
"step": 20800 | |
}, | |
{ | |
"epoch": 64.0, | |
"eval_loss": 0.20628054440021515, | |
"eval_runtime": 1.6321, | |
"eval_samples_per_second": 306.359, | |
"eval_steps_per_second": 38.601, | |
"step": 20800 | |
}, | |
{ | |
"epoch": 65.0, | |
"learning_rate": 1.8065079365079366e-05, | |
"loss": 0.09, | |
"step": 21125 | |
}, | |
{ | |
"epoch": 65.0, | |
"eval_loss": 0.2085714489221573, | |
"eval_runtime": 1.615, | |
"eval_samples_per_second": 309.598, | |
"eval_steps_per_second": 39.009, | |
"step": 21125 | |
}, | |
{ | |
"epoch": 66.0, | |
"learning_rate": 1.7549206349206352e-05, | |
"loss": 0.0899, | |
"step": 21450 | |
}, | |
{ | |
"epoch": 66.0, | |
"eval_loss": 0.20817407965660095, | |
"eval_runtime": 1.6322, | |
"eval_samples_per_second": 306.34, | |
"eval_steps_per_second": 38.599, | |
"step": 21450 | |
}, | |
{ | |
"epoch": 67.0, | |
"learning_rate": 1.7033333333333335e-05, | |
"loss": 0.0897, | |
"step": 21775 | |
}, | |
{ | |
"epoch": 67.0, | |
"eval_loss": 0.20880760252475739, | |
"eval_runtime": 1.6231, | |
"eval_samples_per_second": 308.059, | |
"eval_steps_per_second": 38.815, | |
"step": 21775 | |
}, | |
{ | |
"epoch": 68.0, | |
"learning_rate": 1.6517460317460318e-05, | |
"loss": 0.0892, | |
"step": 22100 | |
}, | |
{ | |
"epoch": 68.0, | |
"eval_loss": 0.20673272013664246, | |
"eval_runtime": 1.6194, | |
"eval_samples_per_second": 308.75, | |
"eval_steps_per_second": 38.902, | |
"step": 22100 | |
}, | |
{ | |
"epoch": 69.0, | |
"learning_rate": 1.6001587301587305e-05, | |
"loss": 0.0891, | |
"step": 22425 | |
}, | |
{ | |
"epoch": 69.0, | |
"eval_loss": 0.20683825016021729, | |
"eval_runtime": 1.5781, | |
"eval_samples_per_second": 316.838, | |
"eval_steps_per_second": 39.922, | |
"step": 22425 | |
}, | |
{ | |
"epoch": 70.0, | |
"learning_rate": 1.5485714285714287e-05, | |
"loss": 0.0888, | |
"step": 22750 | |
}, | |
{ | |
"epoch": 70.0, | |
"eval_loss": 0.20834441483020782, | |
"eval_runtime": 1.6043, | |
"eval_samples_per_second": 311.654, | |
"eval_steps_per_second": 39.268, | |
"step": 22750 | |
}, | |
{ | |
"epoch": 71.0, | |
"learning_rate": 1.496984126984127e-05, | |
"loss": 0.0886, | |
"step": 23075 | |
}, | |
{ | |
"epoch": 71.0, | |
"eval_loss": 0.20795977115631104, | |
"eval_runtime": 1.5947, | |
"eval_samples_per_second": 313.545, | |
"eval_steps_per_second": 39.507, | |
"step": 23075 | |
}, | |
{ | |
"epoch": 72.0, | |
"learning_rate": 1.4453968253968255e-05, | |
"loss": 0.0884, | |
"step": 23400 | |
}, | |
{ | |
"epoch": 72.0, | |
"eval_loss": 0.20644958317279816, | |
"eval_runtime": 1.6293, | |
"eval_samples_per_second": 306.871, | |
"eval_steps_per_second": 38.666, | |
"step": 23400 | |
}, | |
{ | |
"epoch": 73.0, | |
"learning_rate": 1.3938095238095238e-05, | |
"loss": 0.0881, | |
"step": 23725 | |
}, | |
{ | |
"epoch": 73.0, | |
"eval_loss": 0.20805425941944122, | |
"eval_runtime": 1.6284, | |
"eval_samples_per_second": 307.046, | |
"eval_steps_per_second": 38.688, | |
"step": 23725 | |
}, | |
{ | |
"epoch": 74.0, | |
"learning_rate": 1.3422222222222223e-05, | |
"loss": 0.0878, | |
"step": 24050 | |
}, | |
{ | |
"epoch": 74.0, | |
"eval_loss": 0.20709100365638733, | |
"eval_runtime": 1.6294, | |
"eval_samples_per_second": 306.866, | |
"eval_steps_per_second": 38.665, | |
"step": 24050 | |
}, | |
{ | |
"epoch": 75.0, | |
"learning_rate": 1.2906349206349208e-05, | |
"loss": 0.0875, | |
"step": 24375 | |
}, | |
{ | |
"epoch": 75.0, | |
"eval_loss": 0.2094418704509735, | |
"eval_runtime": 1.6183, | |
"eval_samples_per_second": 308.957, | |
"eval_steps_per_second": 38.929, | |
"step": 24375 | |
}, | |
{ | |
"epoch": 76.0, | |
"learning_rate": 1.239047619047619e-05, | |
"loss": 0.0875, | |
"step": 24700 | |
}, | |
{ | |
"epoch": 76.0, | |
"eval_loss": 0.2107103168964386, | |
"eval_runtime": 1.628, | |
"eval_samples_per_second": 307.132, | |
"eval_steps_per_second": 38.699, | |
"step": 24700 | |
}, | |
{ | |
"epoch": 77.0, | |
"learning_rate": 1.1874603174603175e-05, | |
"loss": 0.0871, | |
"step": 25025 | |
}, | |
{ | |
"epoch": 77.0, | |
"eval_loss": 0.21094562113285065, | |
"eval_runtime": 1.6329, | |
"eval_samples_per_second": 306.207, | |
"eval_steps_per_second": 38.582, | |
"step": 25025 | |
}, | |
{ | |
"epoch": 78.0, | |
"learning_rate": 1.1358730158730158e-05, | |
"loss": 0.087, | |
"step": 25350 | |
}, | |
{ | |
"epoch": 78.0, | |
"eval_loss": 0.21239960193634033, | |
"eval_runtime": 1.6224, | |
"eval_samples_per_second": 308.182, | |
"eval_steps_per_second": 38.831, | |
"step": 25350 | |
}, | |
{ | |
"epoch": 79.0, | |
"learning_rate": 1.0842857142857143e-05, | |
"loss": 0.0868, | |
"step": 25675 | |
}, | |
{ | |
"epoch": 79.0, | |
"eval_loss": 0.21309518814086914, | |
"eval_runtime": 1.6334, | |
"eval_samples_per_second": 306.113, | |
"eval_steps_per_second": 38.57, | |
"step": 25675 | |
}, | |
{ | |
"epoch": 80.0, | |
"learning_rate": 1.0326984126984128e-05, | |
"loss": 0.0867, | |
"step": 26000 | |
}, | |
{ | |
"epoch": 80.0, | |
"eval_loss": 0.21296508610248566, | |
"eval_runtime": 1.6313, | |
"eval_samples_per_second": 306.503, | |
"eval_steps_per_second": 38.619, | |
"step": 26000 | |
}, | |
{ | |
"epoch": 81.0, | |
"learning_rate": 9.81111111111111e-06, | |
"loss": 0.0864, | |
"step": 26325 | |
}, | |
{ | |
"epoch": 81.0, | |
"eval_loss": 0.2111913114786148, | |
"eval_runtime": 1.63, | |
"eval_samples_per_second": 306.746, | |
"eval_steps_per_second": 38.65, | |
"step": 26325 | |
}, | |
{ | |
"epoch": 82.0, | |
"learning_rate": 9.295238095238095e-06, | |
"loss": 0.0863, | |
"step": 26650 | |
}, | |
{ | |
"epoch": 82.0, | |
"eval_loss": 0.21297475695610046, | |
"eval_runtime": 1.6157, | |
"eval_samples_per_second": 309.46, | |
"eval_steps_per_second": 38.992, | |
"step": 26650 | |
}, | |
{ | |
"epoch": 83.0, | |
"learning_rate": 8.780952380952382e-06, | |
"loss": 0.086, | |
"step": 26975 | |
}, | |
{ | |
"epoch": 83.0, | |
"eval_loss": 0.21185711026191711, | |
"eval_runtime": 1.6153, | |
"eval_samples_per_second": 309.538, | |
"eval_steps_per_second": 39.002, | |
"step": 26975 | |
}, | |
{ | |
"epoch": 84.0, | |
"learning_rate": 8.265079365079366e-06, | |
"loss": 0.0858, | |
"step": 27300 | |
}, | |
{ | |
"epoch": 84.0, | |
"eval_loss": 0.2126666158437729, | |
"eval_runtime": 1.6047, | |
"eval_samples_per_second": 311.576, | |
"eval_steps_per_second": 39.259, | |
"step": 27300 | |
}, | |
{ | |
"epoch": 85.0, | |
"learning_rate": 7.74920634920635e-06, | |
"loss": 0.0856, | |
"step": 27625 | |
}, | |
{ | |
"epoch": 85.0, | |
"eval_loss": 0.21050776541233063, | |
"eval_runtime": 1.6118, | |
"eval_samples_per_second": 310.205, | |
"eval_steps_per_second": 39.086, | |
"step": 27625 | |
}, | |
{ | |
"epoch": 86.0, | |
"learning_rate": 7.233333333333333e-06, | |
"loss": 0.0854, | |
"step": 27950 | |
}, | |
{ | |
"epoch": 86.0, | |
"eval_loss": 0.21171194314956665, | |
"eval_runtime": 1.6187, | |
"eval_samples_per_second": 308.884, | |
"eval_steps_per_second": 38.919, | |
"step": 27950 | |
}, | |
{ | |
"epoch": 87.0, | |
"learning_rate": 6.717460317460318e-06, | |
"loss": 0.0853, | |
"step": 28275 | |
}, | |
{ | |
"epoch": 87.0, | |
"eval_loss": 0.21184836328029633, | |
"eval_runtime": 1.6069, | |
"eval_samples_per_second": 311.153, | |
"eval_steps_per_second": 39.205, | |
"step": 28275 | |
}, | |
{ | |
"epoch": 88.0, | |
"learning_rate": 6.201587301587302e-06, | |
"loss": 0.085, | |
"step": 28600 | |
}, | |
{ | |
"epoch": 88.0, | |
"eval_loss": 0.21248288452625275, | |
"eval_runtime": 1.6139, | |
"eval_samples_per_second": 309.805, | |
"eval_steps_per_second": 39.035, | |
"step": 28600 | |
}, | |
{ | |
"epoch": 89.0, | |
"learning_rate": 5.685714285714286e-06, | |
"loss": 0.0848, | |
"step": 28925 | |
}, | |
{ | |
"epoch": 89.0, | |
"eval_loss": 0.21106228232383728, | |
"eval_runtime": 1.631, | |
"eval_samples_per_second": 306.568, | |
"eval_steps_per_second": 38.628, | |
"step": 28925 | |
}, | |
{ | |
"epoch": 90.0, | |
"learning_rate": 5.16984126984127e-06, | |
"loss": 0.0847, | |
"step": 29250 | |
}, | |
{ | |
"epoch": 90.0, | |
"eval_loss": 0.21153707802295685, | |
"eval_runtime": 1.6322, | |
"eval_samples_per_second": 306.329, | |
"eval_steps_per_second": 38.598, | |
"step": 29250 | |
}, | |
{ | |
"epoch": 91.0, | |
"learning_rate": 4.655555555555556e-06, | |
"loss": 0.0845, | |
"step": 29575 | |
}, | |
{ | |
"epoch": 91.0, | |
"eval_loss": 0.2115468680858612, | |
"eval_runtime": 1.6278, | |
"eval_samples_per_second": 307.165, | |
"eval_steps_per_second": 38.703, | |
"step": 29575 | |
}, | |
{ | |
"epoch": 92.0, | |
"learning_rate": 4.13968253968254e-06, | |
"loss": 0.0844, | |
"step": 29900 | |
}, | |
{ | |
"epoch": 92.0, | |
"eval_loss": 0.21205155551433563, | |
"eval_runtime": 1.6299, | |
"eval_samples_per_second": 306.762, | |
"eval_steps_per_second": 38.652, | |
"step": 29900 | |
}, | |
{ | |
"epoch": 93.0, | |
"learning_rate": 3.6238095238095236e-06, | |
"loss": 0.0842, | |
"step": 30225 | |
}, | |
{ | |
"epoch": 93.0, | |
"eval_loss": 0.21224457025527954, | |
"eval_runtime": 1.6302, | |
"eval_samples_per_second": 306.709, | |
"eval_steps_per_second": 38.645, | |
"step": 30225 | |
}, | |
{ | |
"epoch": 94.0, | |
"learning_rate": 3.107936507936508e-06, | |
"loss": 0.0839, | |
"step": 30550 | |
}, | |
{ | |
"epoch": 94.0, | |
"eval_loss": 0.21223796904087067, | |
"eval_runtime": 1.6205, | |
"eval_samples_per_second": 308.552, | |
"eval_steps_per_second": 38.878, | |
"step": 30550 | |
}, | |
{ | |
"epoch": 95.0, | |
"learning_rate": 2.592063492063492e-06, | |
"loss": 0.0839, | |
"step": 30875 | |
}, | |
{ | |
"epoch": 95.0, | |
"eval_loss": 0.2121766060590744, | |
"eval_runtime": 1.626, | |
"eval_samples_per_second": 307.496, | |
"eval_steps_per_second": 38.745, | |
"step": 30875 | |
}, | |
{ | |
"epoch": 96.0, | |
"learning_rate": 2.0761904761904764e-06, | |
"loss": 0.0836, | |
"step": 31200 | |
}, | |
{ | |
"epoch": 96.0, | |
"eval_loss": 0.21236176788806915, | |
"eval_runtime": 1.6277, | |
"eval_samples_per_second": 307.178, | |
"eval_steps_per_second": 38.704, | |
"step": 31200 | |
}, | |
{ | |
"epoch": 97.0, | |
"learning_rate": 1.561904761904762e-06, | |
"loss": 0.0836, | |
"step": 31525 | |
}, | |
{ | |
"epoch": 97.0, | |
"eval_loss": 0.21288882195949554, | |
"eval_runtime": 1.6293, | |
"eval_samples_per_second": 306.882, | |
"eval_steps_per_second": 38.667, | |
"step": 31525 | |
}, | |
{ | |
"epoch": 98.0, | |
"learning_rate": 1.046031746031746e-06, | |
"loss": 0.0834, | |
"step": 31850 | |
}, | |
{ | |
"epoch": 98.0, | |
"eval_loss": 0.21266008913516998, | |
"eval_runtime": 1.6301, | |
"eval_samples_per_second": 306.725, | |
"eval_steps_per_second": 38.647, | |
"step": 31850 | |
}, | |
{ | |
"epoch": 99.0, | |
"learning_rate": 5.301587301587302e-07, | |
"loss": 0.0833, | |
"step": 32175 | |
}, | |
{ | |
"epoch": 99.0, | |
"eval_loss": 0.2126527726650238, | |
"eval_runtime": 1.6305, | |
"eval_samples_per_second": 306.648, | |
"eval_steps_per_second": 38.638, | |
"step": 32175 | |
}, | |
{ | |
"epoch": 100.0, | |
"learning_rate": 1.4285714285714288e-08, | |
"loss": 0.0831, | |
"step": 32500 | |
}, | |
{ | |
"epoch": 100.0, | |
"eval_loss": 0.21250146627426147, | |
"eval_runtime": 1.6061, | |
"eval_samples_per_second": 311.323, | |
"eval_steps_per_second": 39.227, | |
"step": 32500 | |
} | |
], | |
"max_steps": 32500, | |
"num_train_epochs": 100, | |
"total_flos": 3.044459658533238e+17, | |
"trial_name": null, | |
"trial_params": null | |
} | |