|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.9939686369119425, |
|
"eval_steps": 500, |
|
"global_step": 1035, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4.807692307692308e-07, |
|
"loss": 1.4009, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.403846153846154e-06, |
|
"loss": 1.2871, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.807692307692308e-06, |
|
"loss": 1.1951, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 7.211538461538461e-06, |
|
"loss": 1.1289, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.615384615384616e-06, |
|
"loss": 1.1274, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.2019230769230771e-05, |
|
"loss": 1.0066, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.4423076923076923e-05, |
|
"loss": 0.9923, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.682692307692308e-05, |
|
"loss": 1.0136, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.923076923076923e-05, |
|
"loss": 1.0153, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 2.1634615384615387e-05, |
|
"loss": 0.9874, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 2.4038461538461542e-05, |
|
"loss": 0.9887, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 2.6442307692307694e-05, |
|
"loss": 0.9289, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 2.8846153846153845e-05, |
|
"loss": 0.9279, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 3.125e-05, |
|
"loss": 0.9228, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 3.365384615384616e-05, |
|
"loss": 0.9526, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 3.605769230769231e-05, |
|
"loss": 0.9023, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.9244, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.0865384615384615e-05, |
|
"loss": 0.885, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.326923076923077e-05, |
|
"loss": 0.917, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.5673076923076925e-05, |
|
"loss": 0.9262, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.8076923076923084e-05, |
|
"loss": 0.8931, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.999985766556415e-05, |
|
"loss": 0.8588, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.999487613048386e-05, |
|
"loss": 0.8868, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.998277949425564e-05, |
|
"loss": 0.8861, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.9963571200336876e-05, |
|
"loss": 0.8955, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.9937256716606394e-05, |
|
"loss": 0.8693, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.9903843533808e-05, |
|
"loss": 0.8645, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.986334116341812e-05, |
|
"loss": 0.8622, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.981576113493827e-05, |
|
"loss": 0.8166, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.976111699261301e-05, |
|
"loss": 0.8387, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.9699424291574436e-05, |
|
"loss": 0.8268, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.9630700593414206e-05, |
|
"loss": 0.8282, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.955496546118439e-05, |
|
"loss": 0.8482, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.947224045382866e-05, |
|
"loss": 0.8075, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.938254912004522e-05, |
|
"loss": 0.8612, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 4.92859169915834e-05, |
|
"loss": 0.8528, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 4.9182371575975736e-05, |
|
"loss": 0.8368, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 4.907194234870759e-05, |
|
"loss": 0.8273, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 4.8954660744826575e-05, |
|
"loss": 0.8127, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.883056014999423e-05, |
|
"loss": 0.8429, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 4.8699675890982345e-05, |
|
"loss": 0.8408, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.8562045225616736e-05, |
|
"loss": 0.8226, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.8232178688049316, |
|
"eval_runtime": 9.8377, |
|
"eval_samples_per_second": 23.583, |
|
"eval_steps_per_second": 5.896, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.8417707332171385e-05, |
|
"loss": 0.7622, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 4.826670329821584e-05, |
|
"loss": 0.6629, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 4.810907610891914e-05, |
|
"loss": 0.6781, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 4.7944870634813525e-05, |
|
"loss": 0.6815, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 4.7774133619021514e-05, |
|
"loss": 0.6885, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 4.759691366394989e-05, |
|
"loss": 0.6674, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 4.741326121745441e-05, |
|
"loss": 0.7042, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 4.722322855847921e-05, |
|
"loss": 0.6955, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 4.702686978217495e-05, |
|
"loss": 0.6949, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 4.682424078449995e-05, |
|
"loss": 0.6823, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 4.6615399246308756e-05, |
|
"loss": 0.6692, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.6400404616932505e-05, |
|
"loss": 0.6632, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 4.617931809725599e-05, |
|
"loss": 0.6618, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 4.595220262229601e-05, |
|
"loss": 0.7224, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 4.571912284328626e-05, |
|
"loss": 0.7081, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 4.5480145109273384e-05, |
|
"loss": 0.6746, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 4.5235337448230034e-05, |
|
"loss": 0.6857, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 4.498476954768981e-05, |
|
"loss": 0.6835, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 4.4728512734909844e-05, |
|
"loss": 0.6893, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 4.446663995656661e-05, |
|
"loss": 0.6608, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 4.419922575799078e-05, |
|
"loss": 0.6919, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 4.39263462619469e-05, |
|
"loss": 0.6883, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 4.364807914696421e-05, |
|
"loss": 0.681, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 4.3364503625224376e-05, |
|
"loss": 0.6898, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 4.307570042001283e-05, |
|
"loss": 0.6589, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 4.2781751742739885e-05, |
|
"loss": 0.6924, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 4.248274126953819e-05, |
|
"loss": 0.6666, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.217875411744334e-05, |
|
"loss": 0.6759, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 4.186987682016422e-05, |
|
"loss": 0.6894, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 4.155619730345015e-05, |
|
"loss": 0.6898, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 4.123780486006167e-05, |
|
"loss": 0.6721, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 4.09147901243523e-05, |
|
"loss": 0.6616, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 4.058724504646834e-05, |
|
"loss": 0.6631, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 4.025526286617411e-05, |
|
"loss": 0.692, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 3.991893808631011e-05, |
|
"loss": 0.6706, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 3.957836644589159e-05, |
|
"loss": 0.6777, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 3.9233644892855285e-05, |
|
"loss": 0.6605, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 3.8884871556461966e-05, |
|
"loss": 0.6911, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 3.8532145719362734e-05, |
|
"loss": 0.6618, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 3.817556778933698e-05, |
|
"loss": 0.6912, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 3.781523927071006e-05, |
|
"loss": 0.6608, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.7941169738769531, |
|
"eval_runtime": 9.8325, |
|
"eval_samples_per_second": 23.595, |
|
"eval_steps_per_second": 5.899, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 3.745126273545886e-05, |
|
"loss": 0.6317, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 3.7083741794013385e-05, |
|
"loss": 0.5188, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 3.671278106576278e-05, |
|
"loss": 0.5433, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 3.6338486149274107e-05, |
|
"loss": 0.5177, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 3.596096359223241e-05, |
|
"loss": 0.5095, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 3.5580320861110625e-05, |
|
"loss": 0.4915, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 3.519666631057789e-05, |
|
"loss": 0.5216, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 3.481010915265504e-05, |
|
"loss": 0.5118, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 3.442075942562608e-05, |
|
"loss": 0.505, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 3.402872796271438e-05, |
|
"loss": 0.5103, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 3.363412636053269e-05, |
|
"loss": 0.532, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 3.323706694731572e-05, |
|
"loss": 0.5151, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 3.2837662750944535e-05, |
|
"loss": 0.5199, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 3.243602746677179e-05, |
|
"loss": 0.5261, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 3.2032275425256916e-05, |
|
"loss": 0.5052, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 3.1626521559420556e-05, |
|
"loss": 0.508, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 3.121888137212742e-05, |
|
"loss": 0.5308, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 3.0809470903207036e-05, |
|
"loss": 0.5024, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 3.0398406696421473e-05, |
|
"loss": 0.5069, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 2.9985805766289817e-05, |
|
"loss": 0.5265, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 2.9571785564778455e-05, |
|
"loss": 0.5138, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 2.9156463947866985e-05, |
|
"loss": 0.515, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 2.8739959141999023e-05, |
|
"loss": 0.5211, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 2.8322389710427548e-05, |
|
"loss": 0.5184, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 2.7903874519464474e-05, |
|
"loss": 0.5256, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 2.748453270464381e-05, |
|
"loss": 0.5132, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 2.7064483636808313e-05, |
|
"loss": 0.4917, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 2.6643846888129048e-05, |
|
"loss": 0.5044, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 2.622274219806773e-05, |
|
"loss": 0.5055, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 2.5801289439291388e-05, |
|
"loss": 0.4964, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 2.5379608583549097e-05, |
|
"loss": 0.5157, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 2.4957819667520596e-05, |
|
"loss": 0.5169, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 2.4536042758646315e-05, |
|
"loss": 0.5232, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 2.4114397920948657e-05, |
|
"loss": 0.517, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 2.369300518085438e-05, |
|
"loss": 0.5095, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 2.327198449302752e-05, |
|
"loss": 0.5329, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 2.2851455706222817e-05, |
|
"loss": 0.5225, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 2.24315385291693e-05, |
|
"loss": 0.5118, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 2.201235249649369e-05, |
|
"loss": 0.502, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 2.1594016934693424e-05, |
|
"loss": 0.5114, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 2.117665092816885e-05, |
|
"loss": 0.5244, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 2.0760373285324415e-05, |
|
"loss": 0.526, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.8185678720474243, |
|
"eval_runtime": 9.84, |
|
"eval_samples_per_second": 23.577, |
|
"eval_steps_per_second": 5.894, |
|
"step": 621 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 2.0345302504748355e-05, |
|
"loss": 0.4593, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 1.993155674148058e-05, |
|
"loss": 0.4372, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 1.9519253773378455e-05, |
|
"loss": 0.4451, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 1.9108510967589784e-05, |
|
"loss": 0.4319, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 1.8699445247142864e-05, |
|
"loss": 0.428, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 1.829217305766289e-05, |
|
"loss": 0.4323, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 3.16, |
|
"learning_rate": 1.7886810334224192e-05, |
|
"loss": 0.445, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 1.7483472468347982e-05, |
|
"loss": 0.444, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 1.7082274275154675e-05, |
|
"loss": 0.436, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"learning_rate": 1.668332996068034e-05, |
|
"loss": 0.4187, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 3.26, |
|
"learning_rate": 1.6286753089366532e-05, |
|
"loss": 0.4405, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 1.5892656551732814e-05, |
|
"loss": 0.4232, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 3.31, |
|
"learning_rate": 1.5501152532241005e-05, |
|
"loss": 0.4287, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 1.5112352477360537e-05, |
|
"loss": 0.4121, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 1.472636706384382e-05, |
|
"loss": 0.4355, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 3.38, |
|
"learning_rate": 1.4343306167220708e-05, |
|
"loss": 0.4082, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"learning_rate": 1.3963278830521104e-05, |
|
"loss": 0.4251, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 1.3586393233234501e-05, |
|
"loss": 0.4102, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 1.3212756660515338e-05, |
|
"loss": 0.4297, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 3.47, |
|
"learning_rate": 1.2842475472642968e-05, |
|
"loss": 0.4329, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 1.2475655074744855e-05, |
|
"loss": 0.4319, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 1.2112399886791747e-05, |
|
"loss": 0.4318, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 1.1752813313873213e-05, |
|
"loss": 0.4498, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 3.57, |
|
"learning_rate": 1.139699771676207e-05, |
|
"loss": 0.4213, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 3.59, |
|
"learning_rate": 1.10450543827762e-05, |
|
"loss": 0.4203, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 1.0697083496945765e-05, |
|
"loss": 0.4355, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 1.0353184113494386e-05, |
|
"loss": 0.4253, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 1.0013454127642069e-05, |
|
"loss": 0.4225, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 3.69, |
|
"learning_rate": 9.677990247738159e-06, |
|
"loss": 0.4248, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 9.346887967732057e-06, |
|
"loss": 0.4145, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 9.020241539989746e-06, |
|
"loss": 0.4261, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 8.698143948463652e-06, |
|
"loss": 0.424, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 3.79, |
|
"learning_rate": 8.380686882223618e-06, |
|
"loss": 0.4398, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 3.81, |
|
"learning_rate": 8.067960709356478e-06, |
|
"loss": 0.4411, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 7.760054451241635e-06, |
|
"loss": 0.4217, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 7.4570557572100714e-06, |
|
"loss": 0.4321, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 7.159050879593843e-06, |
|
"loss": 0.4352, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 6.866124649173295e-06, |
|
"loss": 0.4161, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 6.5783604510289365e-06, |
|
"loss": 0.4353, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 6.295840200804823e-06, |
|
"loss": 0.4404, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"learning_rate": 6.018644321390288e-06, |
|
"loss": 0.4388, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.8642870783805847, |
|
"eval_runtime": 9.886, |
|
"eval_samples_per_second": 23.467, |
|
"eval_steps_per_second": 5.867, |
|
"step": 829 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 5.7468517200265276e-06, |
|
"loss": 0.4298, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 5.480539765844725e-06, |
|
"loss": 0.4086, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 5.2197842678419204e-06, |
|
"loss": 0.4117, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 4.9646594533010875e-06, |
|
"loss": 0.4236, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 4.71523794666138e-06, |
|
"loss": 0.4184, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 4.13, |
|
"learning_rate": 4.471590748844703e-06, |
|
"loss": 0.4028, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"learning_rate": 4.23378721704443e-06, |
|
"loss": 0.3819, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 4.001895044981982e-06, |
|
"loss": 0.4075, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"learning_rate": 3.7759802436370174e-06, |
|
"loss": 0.3985, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 4.22, |
|
"learning_rate": 3.5561071224565617e-06, |
|
"loss": 0.4108, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 3.342338271048526e-06, |
|
"loss": 0.4295, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 3.134734541364817e-06, |
|
"loss": 0.4196, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 4.29, |
|
"learning_rate": 2.933355030379048e-06, |
|
"loss": 0.3981, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 2.7382570632638854e-06, |
|
"loss": 0.4105, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 4.34, |
|
"learning_rate": 2.549496177072702e-06, |
|
"loss": 0.4149, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.37, |
|
"learning_rate": 2.3671261049302865e-06, |
|
"loss": 0.4132, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 2.1911987607370123e-06, |
|
"loss": 0.4011, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 2.021764224390954e-06, |
|
"loss": 0.4062, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 4.44, |
|
"learning_rate": 1.8588707275319667e-06, |
|
"loss": 0.4021, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 1.7025646398119988e-06, |
|
"loss": 0.4168, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 1.552890455695369e-06, |
|
"loss": 0.4246, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 4.51, |
|
"learning_rate": 1.409890781792872e-06, |
|
"loss": 0.4323, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 1.273606324733284e-06, |
|
"loss": 0.4106, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 4.56, |
|
"learning_rate": 1.144075879575715e-06, |
|
"loss": 0.4058, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 1.0213363187661084e-06, |
|
"loss": 0.4094, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 9.054225816410522e-07, |
|
"loss": 0.416, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 7.963676644818507e-07, |
|
"loss": 0.4105, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 6.942026111217359e-07, |
|
"loss": 0.4091, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 4.68, |
|
"learning_rate": 5.989565041088552e-07, |
|
"loss": 0.4039, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 4.7, |
|
"learning_rate": 5.106564564275607e-07, |
|
"loss": 0.4093, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 4.293276037803551e-07, |
|
"loss": 0.4051, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 3.5499309743271535e-07, |
|
"loss": 0.4143, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 4.78, |
|
"learning_rate": 2.876740976227893e-07, |
|
"loss": 0.4265, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 2.2738976753788331e-07, |
|
"loss": 0.4004, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 4.83, |
|
"learning_rate": 1.7415726785939834e-07, |
|
"loss": 0.4109, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 1.2799175187786783e-07, |
|
"loss": 0.3954, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 8.89063611793417e-08, |
|
"loss": 0.4196, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 5.691222190451206e-08, |
|
"loss": 0.4141, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 3.2018441581493916e-08, |
|
"loss": 0.4125, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 1.4232106533254575e-08, |
|
"loss": 0.4142, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 4.97, |
|
"learning_rate": 3.5582798604244516e-09, |
|
"loss": 0.4073, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 0.0, |
|
"loss": 0.3888, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"eval_loss": 0.8771134614944458, |
|
"eval_runtime": 9.8426, |
|
"eval_samples_per_second": 23.571, |
|
"eval_steps_per_second": 5.893, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"step": 1035, |
|
"total_flos": 2.1046133178499072e+17, |
|
"train_loss": 0.5910406330357427, |
|
"train_runtime": 2967.3423, |
|
"train_samples_per_second": 5.586, |
|
"train_steps_per_second": 0.349 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 1035, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 100, |
|
"total_flos": 2.1046133178499072e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|