|
{ |
|
"best_metric": 0.16280439496040344, |
|
"best_model_checkpoint": "./results/checkpoint-500", |
|
"epoch": 4.8076923076923075, |
|
"eval_steps": 20, |
|
"global_step": 1000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09615384615384616, |
|
"grad_norm": 1.8630799055099487, |
|
"learning_rate": 1.9615384615384617e-05, |
|
"loss": 0.4117, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.09615384615384616, |
|
"eval_accuracy": 0.8387096774193549, |
|
"eval_loss": 0.29503169655799866, |
|
"eval_runtime": 2.9188, |
|
"eval_samples_per_second": 31.863, |
|
"eval_steps_per_second": 8.223, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19230769230769232, |
|
"grad_norm": 9.186605453491211, |
|
"learning_rate": 1.923076923076923e-05, |
|
"loss": 0.3727, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.19230769230769232, |
|
"eval_accuracy": 0.8709677419354839, |
|
"eval_loss": 0.20473997294902802, |
|
"eval_runtime": 2.8638, |
|
"eval_samples_per_second": 32.475, |
|
"eval_steps_per_second": 8.381, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.28846153846153844, |
|
"grad_norm": 8.3997802734375, |
|
"learning_rate": 1.8846153846153846e-05, |
|
"loss": 0.2863, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.28846153846153844, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.2754175662994385, |
|
"eval_runtime": 2.7512, |
|
"eval_samples_per_second": 33.803, |
|
"eval_steps_per_second": 8.723, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.38461538461538464, |
|
"grad_norm": 0.0750838965177536, |
|
"learning_rate": 1.8461538461538465e-05, |
|
"loss": 0.2698, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.38461538461538464, |
|
"eval_accuracy": 0.946236559139785, |
|
"eval_loss": 0.16919000446796417, |
|
"eval_runtime": 2.6759, |
|
"eval_samples_per_second": 34.754, |
|
"eval_steps_per_second": 8.969, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.4807692307692308, |
|
"grad_norm": 0.0878710225224495, |
|
"learning_rate": 1.807692307692308e-05, |
|
"loss": 0.1232, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.4807692307692308, |
|
"eval_accuracy": 0.946236559139785, |
|
"eval_loss": 0.16618885099887848, |
|
"eval_runtime": 2.6422, |
|
"eval_samples_per_second": 35.197, |
|
"eval_steps_per_second": 9.083, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.5769230769230769, |
|
"grad_norm": 0.038383062928915024, |
|
"learning_rate": 1.7692307692307694e-05, |
|
"loss": 0.0806, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.5769230769230769, |
|
"eval_accuracy": 0.946236559139785, |
|
"eval_loss": 0.15975366532802582, |
|
"eval_runtime": 2.6652, |
|
"eval_samples_per_second": 34.894, |
|
"eval_steps_per_second": 9.005, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.6730769230769231, |
|
"grad_norm": 0.022622136399149895, |
|
"learning_rate": 1.730769230769231e-05, |
|
"loss": 0.2571, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.6730769230769231, |
|
"eval_accuracy": 0.978494623655914, |
|
"eval_loss": 0.0726698786020279, |
|
"eval_runtime": 2.6685, |
|
"eval_samples_per_second": 34.85, |
|
"eval_steps_per_second": 8.994, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.7692307692307693, |
|
"grad_norm": 9.369976043701172, |
|
"learning_rate": 1.6923076923076924e-05, |
|
"loss": 0.1374, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.7692307692307693, |
|
"eval_accuracy": 0.978494623655914, |
|
"eval_loss": 0.0701598972082138, |
|
"eval_runtime": 2.6859, |
|
"eval_samples_per_second": 34.626, |
|
"eval_steps_per_second": 8.936, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.8653846153846154, |
|
"grad_norm": 7.6264119148254395, |
|
"learning_rate": 1.653846153846154e-05, |
|
"loss": 0.165, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.8653846153846154, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.08651256561279297, |
|
"eval_runtime": 2.7028, |
|
"eval_samples_per_second": 34.409, |
|
"eval_steps_per_second": 8.88, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.9615384615384616, |
|
"grad_norm": 0.03415932506322861, |
|
"learning_rate": 1.6153846153846154e-05, |
|
"loss": 0.528, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.9615384615384616, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.12613914906978607, |
|
"eval_runtime": 2.7168, |
|
"eval_samples_per_second": 34.231, |
|
"eval_steps_per_second": 8.834, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.0576923076923077, |
|
"grad_norm": 0.17008128762245178, |
|
"learning_rate": 1.576923076923077e-05, |
|
"loss": 0.1023, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.0576923076923077, |
|
"eval_accuracy": 0.9139784946236559, |
|
"eval_loss": 0.37878894805908203, |
|
"eval_runtime": 2.7414, |
|
"eval_samples_per_second": 33.924, |
|
"eval_steps_per_second": 8.755, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.1538461538461537, |
|
"grad_norm": 0.010861682705581188, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 0.0278, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.1538461538461537, |
|
"eval_accuracy": 0.946236559139785, |
|
"eval_loss": 0.1893940567970276, |
|
"eval_runtime": 2.7091, |
|
"eval_samples_per_second": 34.329, |
|
"eval_steps_per_second": 8.859, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 0.017718007788062096, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 0.1642, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"eval_accuracy": 0.9139784946236559, |
|
"eval_loss": 0.3861010670661926, |
|
"eval_runtime": 2.6955, |
|
"eval_samples_per_second": 34.503, |
|
"eval_steps_per_second": 8.904, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.3461538461538463, |
|
"grad_norm": 0.021516259759664536, |
|
"learning_rate": 1.4615384615384615e-05, |
|
"loss": 0.1108, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.3461538461538463, |
|
"eval_accuracy": 0.9354838709677419, |
|
"eval_loss": 0.20804111659526825, |
|
"eval_runtime": 2.9167, |
|
"eval_samples_per_second": 31.886, |
|
"eval_steps_per_second": 8.229, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.4423076923076923, |
|
"grad_norm": 0.013441790826618671, |
|
"learning_rate": 1.4230769230769232e-05, |
|
"loss": 0.1514, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.4423076923076923, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.15365849435329437, |
|
"eval_runtime": 2.8225, |
|
"eval_samples_per_second": 32.95, |
|
"eval_steps_per_second": 8.503, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.5384615384615383, |
|
"grad_norm": 0.02140916883945465, |
|
"learning_rate": 1.3846153846153847e-05, |
|
"loss": 0.0025, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.5384615384615383, |
|
"eval_accuracy": 0.946236559139785, |
|
"eval_loss": 0.30473944544792175, |
|
"eval_runtime": 2.6926, |
|
"eval_samples_per_second": 34.539, |
|
"eval_steps_per_second": 8.913, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.6346153846153846, |
|
"grad_norm": 0.011240499094128609, |
|
"learning_rate": 1.3461538461538463e-05, |
|
"loss": 0.1124, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.6346153846153846, |
|
"eval_accuracy": 0.9354838709677419, |
|
"eval_loss": 0.24811075627803802, |
|
"eval_runtime": 2.6909, |
|
"eval_samples_per_second": 34.561, |
|
"eval_steps_per_second": 8.919, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.7307692307692308, |
|
"grad_norm": 0.009658828377723694, |
|
"learning_rate": 1.3076923076923078e-05, |
|
"loss": 0.3081, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.7307692307692308, |
|
"eval_accuracy": 0.978494623655914, |
|
"eval_loss": 0.1271650344133377, |
|
"eval_runtime": 2.6952, |
|
"eval_samples_per_second": 34.505, |
|
"eval_steps_per_second": 8.905, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.8269230769230769, |
|
"grad_norm": 48.972293853759766, |
|
"learning_rate": 1.2692307692307693e-05, |
|
"loss": 0.0764, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.8269230769230769, |
|
"eval_accuracy": 0.989247311827957, |
|
"eval_loss": 0.0935346931219101, |
|
"eval_runtime": 2.7195, |
|
"eval_samples_per_second": 34.198, |
|
"eval_steps_per_second": 8.825, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.9230769230769231, |
|
"grad_norm": 0.01087904442101717, |
|
"learning_rate": 1.230769230769231e-05, |
|
"loss": 0.0923, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.9230769230769231, |
|
"eval_accuracy": 0.9354838709677419, |
|
"eval_loss": 0.23266802728176117, |
|
"eval_runtime": 2.7195, |
|
"eval_samples_per_second": 34.197, |
|
"eval_steps_per_second": 8.825, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.019230769230769, |
|
"grad_norm": 19.043825149536133, |
|
"learning_rate": 1.1923076923076925e-05, |
|
"loss": 0.137, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.019230769230769, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.16817408800125122, |
|
"eval_runtime": 2.7341, |
|
"eval_samples_per_second": 34.015, |
|
"eval_steps_per_second": 8.778, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.1153846153846154, |
|
"grad_norm": 0.036368228495121, |
|
"learning_rate": 1.1538461538461538e-05, |
|
"loss": 0.0796, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.1153846153846154, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.16539430618286133, |
|
"eval_runtime": 2.7265, |
|
"eval_samples_per_second": 34.11, |
|
"eval_steps_per_second": 8.803, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.2115384615384617, |
|
"grad_norm": 0.03690864145755768, |
|
"learning_rate": 1.1153846153846154e-05, |
|
"loss": 0.0089, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.2115384615384617, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.2199721783399582, |
|
"eval_runtime": 2.7071, |
|
"eval_samples_per_second": 34.354, |
|
"eval_steps_per_second": 8.865, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.3076923076923075, |
|
"grad_norm": 0.011880586855113506, |
|
"learning_rate": 1.076923076923077e-05, |
|
"loss": 0.1514, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.3076923076923075, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.20590856671333313, |
|
"eval_runtime": 2.697, |
|
"eval_samples_per_second": 34.482, |
|
"eval_steps_per_second": 8.899, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.4038461538461537, |
|
"grad_norm": 0.008282246068120003, |
|
"learning_rate": 1.0384615384615386e-05, |
|
"loss": 0.0044, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.4038461538461537, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.16280439496040344, |
|
"eval_runtime": 2.7133, |
|
"eval_samples_per_second": 34.275, |
|
"eval_steps_per_second": 8.845, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.01043081283569336, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0068, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.21267160773277283, |
|
"eval_runtime": 2.7065, |
|
"eval_samples_per_second": 34.362, |
|
"eval_steps_per_second": 8.868, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.5961538461538463, |
|
"grad_norm": 0.005768711678683758, |
|
"learning_rate": 9.615384615384616e-06, |
|
"loss": 0.0024, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.5961538461538463, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.1982511430978775, |
|
"eval_runtime": 2.7855, |
|
"eval_samples_per_second": 33.387, |
|
"eval_steps_per_second": 8.616, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.6923076923076925, |
|
"grad_norm": 0.009032518602907658, |
|
"learning_rate": 9.230769230769232e-06, |
|
"loss": 0.0024, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.6923076923076925, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.19726961851119995, |
|
"eval_runtime": 2.8465, |
|
"eval_samples_per_second": 32.672, |
|
"eval_steps_per_second": 8.431, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.7884615384615383, |
|
"grad_norm": 0.003906765952706337, |
|
"learning_rate": 8.846153846153847e-06, |
|
"loss": 0.0011, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 2.7884615384615383, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.18109194934368134, |
|
"eval_runtime": 2.7675, |
|
"eval_samples_per_second": 33.604, |
|
"eval_steps_per_second": 8.672, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 2.8846153846153846, |
|
"grad_norm": 0.003964190371334553, |
|
"learning_rate": 8.461538461538462e-06, |
|
"loss": 0.0728, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.8846153846153846, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.19110581278800964, |
|
"eval_runtime": 2.697, |
|
"eval_samples_per_second": 34.482, |
|
"eval_steps_per_second": 8.899, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.980769230769231, |
|
"grad_norm": 0.03648888319730759, |
|
"learning_rate": 8.076923076923077e-06, |
|
"loss": 0.0004, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 2.980769230769231, |
|
"eval_accuracy": 0.946236559139785, |
|
"eval_loss": 0.1861688792705536, |
|
"eval_runtime": 2.6824, |
|
"eval_samples_per_second": 34.671, |
|
"eval_steps_per_second": 8.947, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 3.076923076923077, |
|
"grad_norm": 0.014054065570235252, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 0.0004, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 3.076923076923077, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.18032845854759216, |
|
"eval_runtime": 2.6409, |
|
"eval_samples_per_second": 35.216, |
|
"eval_steps_per_second": 9.088, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 3.173076923076923, |
|
"grad_norm": 0.39815962314605713, |
|
"learning_rate": 7.307692307692308e-06, |
|
"loss": 0.0004, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 3.173076923076923, |
|
"eval_accuracy": 0.956989247311828, |
|
"eval_loss": 0.1884646713733673, |
|
"eval_runtime": 2.6511, |
|
"eval_samples_per_second": 35.079, |
|
"eval_steps_per_second": 9.053, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 3.269230769230769, |
|
"grad_norm": 0.006757414899766445, |
|
"learning_rate": 6.923076923076923e-06, |
|
"loss": 0.0003, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 3.269230769230769, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.20474936068058014, |
|
"eval_runtime": 2.6761, |
|
"eval_samples_per_second": 34.752, |
|
"eval_steps_per_second": 8.968, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 3.3653846153846154, |
|
"grad_norm": 0.005609381478279829, |
|
"learning_rate": 6.538461538461539e-06, |
|
"loss": 0.0002, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.3653846153846154, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.2052512764930725, |
|
"eval_runtime": 2.7156, |
|
"eval_samples_per_second": 34.247, |
|
"eval_steps_per_second": 8.838, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.4615384615384617, |
|
"grad_norm": 0.005083520896732807, |
|
"learning_rate": 6.153846153846155e-06, |
|
"loss": 0.1008, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 3.4615384615384617, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.2019633650779724, |
|
"eval_runtime": 2.751, |
|
"eval_samples_per_second": 33.806, |
|
"eval_steps_per_second": 8.724, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 3.5576923076923075, |
|
"grad_norm": 0.0040250192396342754, |
|
"learning_rate": 5.769230769230769e-06, |
|
"loss": 0.0002, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 3.5576923076923075, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.19885139167308807, |
|
"eval_runtime": 2.7384, |
|
"eval_samples_per_second": 33.962, |
|
"eval_steps_per_second": 8.764, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 3.6538461538461537, |
|
"grad_norm": 0.009717087261378765, |
|
"learning_rate": 5.384615384615385e-06, |
|
"loss": 0.0925, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 3.6538461538461537, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.17406632006168365, |
|
"eval_runtime": 2.7038, |
|
"eval_samples_per_second": 34.396, |
|
"eval_steps_per_second": 8.876, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"grad_norm": 0.005793453194200993, |
|
"learning_rate": 5e-06, |
|
"loss": 0.0003, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.17425128817558289, |
|
"eval_runtime": 2.6992, |
|
"eval_samples_per_second": 34.454, |
|
"eval_steps_per_second": 8.891, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 3.8461538461538463, |
|
"grad_norm": 0.011910405941307545, |
|
"learning_rate": 4.615384615384616e-06, |
|
"loss": 0.0073, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 3.8461538461538463, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.16527394950389862, |
|
"eval_runtime": 2.6718, |
|
"eval_samples_per_second": 34.808, |
|
"eval_steps_per_second": 8.983, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 3.9423076923076925, |
|
"grad_norm": 0.010804111137986183, |
|
"learning_rate": 4.230769230769231e-06, |
|
"loss": 0.0003, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 3.9423076923076925, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.16675765812397003, |
|
"eval_runtime": 2.7088, |
|
"eval_samples_per_second": 34.333, |
|
"eval_steps_per_second": 8.86, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 4.038461538461538, |
|
"grad_norm": 0.0066652605310082436, |
|
"learning_rate": 3.846153846153847e-06, |
|
"loss": 0.0003, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 4.038461538461538, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.16954387724399567, |
|
"eval_runtime": 2.7006, |
|
"eval_samples_per_second": 34.437, |
|
"eval_steps_per_second": 8.887, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 4.134615384615385, |
|
"grad_norm": 0.004465825390070677, |
|
"learning_rate": 3.4615384615384617e-06, |
|
"loss": 0.0022, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 4.134615384615385, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.18703444302082062, |
|
"eval_runtime": 2.6987, |
|
"eval_samples_per_second": 34.462, |
|
"eval_steps_per_second": 8.893, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 4.230769230769231, |
|
"grad_norm": 0.03985193744301796, |
|
"learning_rate": 3.0769230769230774e-06, |
|
"loss": 0.0002, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 4.230769230769231, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.1847122311592102, |
|
"eval_runtime": 2.6856, |
|
"eval_samples_per_second": 34.629, |
|
"eval_steps_per_second": 8.936, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 4.326923076923077, |
|
"grad_norm": 0.0049697416834533215, |
|
"learning_rate": 2.6923076923076923e-06, |
|
"loss": 0.0656, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.326923076923077, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.18260079622268677, |
|
"eval_runtime": 2.6891, |
|
"eval_samples_per_second": 34.585, |
|
"eval_steps_per_second": 8.925, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.423076923076923, |
|
"grad_norm": 0.00540167884901166, |
|
"learning_rate": 2.307692307692308e-06, |
|
"loss": 0.0739, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 4.423076923076923, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.17730936408042908, |
|
"eval_runtime": 2.7193, |
|
"eval_samples_per_second": 34.2, |
|
"eval_steps_per_second": 8.826, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 4.519230769230769, |
|
"grad_norm": 0.00519781606271863, |
|
"learning_rate": 1.9230769230769234e-06, |
|
"loss": 0.0005, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 4.519230769230769, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.17037908732891083, |
|
"eval_runtime": 2.7306, |
|
"eval_samples_per_second": 34.058, |
|
"eval_steps_per_second": 8.789, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 4.615384615384615, |
|
"grad_norm": 0.006678344216197729, |
|
"learning_rate": 1.5384615384615387e-06, |
|
"loss": 0.0007, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 4.615384615384615, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.1829458475112915, |
|
"eval_runtime": 2.7162, |
|
"eval_samples_per_second": 34.239, |
|
"eval_steps_per_second": 8.836, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 4.711538461538462, |
|
"grad_norm": 0.00803500134497881, |
|
"learning_rate": 1.153846153846154e-06, |
|
"loss": 0.0003, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 4.711538461538462, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.1840982586145401, |
|
"eval_runtime": 2.6991, |
|
"eval_samples_per_second": 34.456, |
|
"eval_steps_per_second": 8.892, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 4.8076923076923075, |
|
"grad_norm": 0.006934212986379862, |
|
"learning_rate": 7.692307692307694e-07, |
|
"loss": 0.0003, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.8076923076923075, |
|
"eval_accuracy": 0.967741935483871, |
|
"eval_loss": 0.18442070484161377, |
|
"eval_runtime": 2.6905, |
|
"eval_samples_per_second": 34.566, |
|
"eval_steps_per_second": 8.92, |
|
"step": 1000 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 1040, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 963160670914020.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|