|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 76, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013157894736842105, |
|
"grad_norm": 1700.623831965343, |
|
"learning_rate": 1.25e-06, |
|
"loss": 11.6905, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02631578947368421, |
|
"grad_norm": 1649.0192864629867, |
|
"learning_rate": 2.5e-06, |
|
"loss": 11.7067, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.039473684210526314, |
|
"grad_norm": 1101.5446707599804, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 10.8183, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.05263157894736842, |
|
"grad_norm": 1102.519048385476, |
|
"learning_rate": 5e-06, |
|
"loss": 9.0508, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06578947368421052, |
|
"grad_norm": 802.3239878863764, |
|
"learning_rate": 6.25e-06, |
|
"loss": 5.9038, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07894736842105263, |
|
"grad_norm": 258.11430612786404, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 4.7947, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09210526315789473, |
|
"grad_norm": 403.267159818518, |
|
"learning_rate": 8.750000000000001e-06, |
|
"loss": 5.461, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.10526315789473684, |
|
"grad_norm": 98.17603349248624, |
|
"learning_rate": 1e-05, |
|
"loss": 3.1841, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.11842105263157894, |
|
"grad_norm": 125.08383294967409, |
|
"learning_rate": 9.994664874011864e-06, |
|
"loss": 5.2682, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.13157894736842105, |
|
"grad_norm": 178.4978710757842, |
|
"learning_rate": 9.978670881475173e-06, |
|
"loss": 4.6418, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14473684210526316, |
|
"grad_norm": 33.98589110631106, |
|
"learning_rate": 9.952052154376027e-06, |
|
"loss": 3.3566, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.15789473684210525, |
|
"grad_norm": 40.35276750130872, |
|
"learning_rate": 9.91486549841951e-06, |
|
"loss": 3.6266, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.17105263157894737, |
|
"grad_norm": 26.024781858769245, |
|
"learning_rate": 9.867190271803466e-06, |
|
"loss": 2.921, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.18421052631578946, |
|
"grad_norm": 39.19155525491511, |
|
"learning_rate": 9.809128215864096e-06, |
|
"loss": 3.3999, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.19736842105263158, |
|
"grad_norm": 31.392187014966936, |
|
"learning_rate": 9.74080323795483e-06, |
|
"loss": 2.9989, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.21052631578947367, |
|
"grad_norm": 11.487293577006643, |
|
"learning_rate": 9.66236114702178e-06, |
|
"loss": 2.4908, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2236842105263158, |
|
"grad_norm": 23.906559218300867, |
|
"learning_rate": 9.573969342440107e-06, |
|
"loss": 2.3537, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.23684210526315788, |
|
"grad_norm": 18.145322592938253, |
|
"learning_rate": 9.475816456775313e-06, |
|
"loss": 2.3298, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 19.690523946140633, |
|
"learning_rate": 9.368111953231849e-06, |
|
"loss": 2.1922, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.2631578947368421, |
|
"grad_norm": 9.140283229170583, |
|
"learning_rate": 9.251085678648072e-06, |
|
"loss": 2.0839, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.27631578947368424, |
|
"grad_norm": 17.542896607710595, |
|
"learning_rate": 9.124987372991512e-06, |
|
"loss": 2.1548, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.2894736842105263, |
|
"grad_norm": 14.27679773227752, |
|
"learning_rate": 8.990086136401199e-06, |
|
"loss": 2.0888, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.3026315789473684, |
|
"grad_norm": 16.817965246454907, |
|
"learning_rate": 8.846669854914395e-06, |
|
"loss": 1.8762, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3157894736842105, |
|
"grad_norm": 10.564762547828062, |
|
"learning_rate": 8.695044586103297e-06, |
|
"loss": 1.8733, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.32894736842105265, |
|
"grad_norm": 14.138309552512082, |
|
"learning_rate": 8.535533905932739e-06, |
|
"loss": 1.7913, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.34210526315789475, |
|
"grad_norm": 20.034746365001553, |
|
"learning_rate": 8.368478218232787e-06, |
|
"loss": 1.737, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.35526315789473684, |
|
"grad_norm": 16.855851572810284, |
|
"learning_rate": 8.194234028259806e-06, |
|
"loss": 1.6829, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.3684210526315789, |
|
"grad_norm": 16.950076744646207, |
|
"learning_rate": 8.013173181896283e-06, |
|
"loss": 1.4163, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.3815789473684211, |
|
"grad_norm": 22.124680222566646, |
|
"learning_rate": 7.82568207211296e-06, |
|
"loss": 1.5623, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.39473684210526316, |
|
"grad_norm": 28.487415904850273, |
|
"learning_rate": 7.63216081438678e-06, |
|
"loss": 1.4885, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.40789473684210525, |
|
"grad_norm": 43.5618784839132, |
|
"learning_rate": 7.4330223928342814e-06, |
|
"loss": 1.5518, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.42105263157894735, |
|
"grad_norm": 24.171098856189804, |
|
"learning_rate": 7.2286917788826926e-06, |
|
"loss": 1.4394, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.4342105263157895, |
|
"grad_norm": 23.119722073349426, |
|
"learning_rate": 7.019605024359475e-06, |
|
"loss": 1.2995, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.4473684210526316, |
|
"grad_norm": 16.689485734152086, |
|
"learning_rate": 6.806208330935766e-06, |
|
"loss": 1.1763, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.4605263157894737, |
|
"grad_norm": 15.859289728493408, |
|
"learning_rate": 6.588957097909509e-06, |
|
"loss": 1.144, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.47368421052631576, |
|
"grad_norm": 15.73886933600492, |
|
"learning_rate": 6.368314950360416e-06, |
|
"loss": 1.1619, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4868421052631579, |
|
"grad_norm": 15.1571280026014, |
|
"learning_rate": 6.144752749750671e-06, |
|
"loss": 1.0688, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 13.225349267137519, |
|
"learning_rate": 5.918747589082853e-06, |
|
"loss": 1.1828, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5131578947368421, |
|
"grad_norm": 14.492320032343926, |
|
"learning_rate": 5.690781774759412e-06, |
|
"loss": 1.0862, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.5263157894736842, |
|
"grad_norm": 9.45491415828414, |
|
"learning_rate": 5.46134179731651e-06, |
|
"loss": 1.0357, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5394736842105263, |
|
"grad_norm": 11.676684639895935, |
|
"learning_rate": 5.230917293228699e-06, |
|
"loss": 0.952, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.5526315789473685, |
|
"grad_norm": 17.496877680196757, |
|
"learning_rate": 5e-06, |
|
"loss": 0.9923, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5657894736842105, |
|
"grad_norm": 21.183543111805434, |
|
"learning_rate": 4.7690827067713035e-06, |
|
"loss": 1.1084, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5789473684210527, |
|
"grad_norm": 16.65044124544019, |
|
"learning_rate": 4.53865820268349e-06, |
|
"loss": 1.0066, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.5921052631578947, |
|
"grad_norm": 14.428813163253018, |
|
"learning_rate": 4.309218225240591e-06, |
|
"loss": 0.8975, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6052631578947368, |
|
"grad_norm": 13.962879418934243, |
|
"learning_rate": 4.081252410917148e-06, |
|
"loss": 0.9082, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.618421052631579, |
|
"grad_norm": 14.852973745352799, |
|
"learning_rate": 3.855247250249331e-06, |
|
"loss": 0.7634, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.631578947368421, |
|
"grad_norm": 16.12742922776505, |
|
"learning_rate": 3.6316850496395863e-06, |
|
"loss": 0.9353, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6447368421052632, |
|
"grad_norm": 10.839276159848792, |
|
"learning_rate": 3.4110429020904924e-06, |
|
"loss": 0.8348, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.6578947368421053, |
|
"grad_norm": 28.986407002948578, |
|
"learning_rate": 3.1937916690642356e-06, |
|
"loss": 0.9786, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6710526315789473, |
|
"grad_norm": 9.721255219536264, |
|
"learning_rate": 2.980394975640526e-06, |
|
"loss": 0.7851, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.6842105263157895, |
|
"grad_norm": 14.650668800173477, |
|
"learning_rate": 2.771308221117309e-06, |
|
"loss": 0.9191, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.6973684210526315, |
|
"grad_norm": 12.383256838083554, |
|
"learning_rate": 2.5669776071657194e-06, |
|
"loss": 0.911, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.7105263157894737, |
|
"grad_norm": 9.794724839588078, |
|
"learning_rate": 2.3678391856132203e-06, |
|
"loss": 0.8739, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.7236842105263158, |
|
"grad_norm": 16.630173515158628, |
|
"learning_rate": 2.174317927887041e-06, |
|
"loss": 0.8075, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.7368421052631579, |
|
"grad_norm": 13.198903695310259, |
|
"learning_rate": 1.9868268181037186e-06, |
|
"loss": 0.9054, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 12.519325479242989, |
|
"learning_rate": 1.8057659717401948e-06, |
|
"loss": 0.7372, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.7631578947368421, |
|
"grad_norm": 10.337188449964808, |
|
"learning_rate": 1.6315217817672142e-06, |
|
"loss": 0.7301, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.7763157894736842, |
|
"grad_norm": 8.532097408416009, |
|
"learning_rate": 1.4644660940672628e-06, |
|
"loss": 0.7052, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.7894736842105263, |
|
"grad_norm": 9.94979317440902, |
|
"learning_rate": 1.3049554138967052e-06, |
|
"loss": 0.701, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8026315789473685, |
|
"grad_norm": 14.270660743327786, |
|
"learning_rate": 1.1533301450856054e-06, |
|
"loss": 0.7537, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.8157894736842105, |
|
"grad_norm": 8.630752726293801, |
|
"learning_rate": 1.0099138635988026e-06, |
|
"loss": 0.7162, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.8289473684210527, |
|
"grad_norm": 10.632504441908269, |
|
"learning_rate": 8.750126270084891e-07, |
|
"loss": 0.7647, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 8.980630672317927, |
|
"learning_rate": 7.489143213519301e-07, |
|
"loss": 0.6812, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.8552631578947368, |
|
"grad_norm": 5.941122650864663, |
|
"learning_rate": 6.318880467681527e-07, |
|
"loss": 0.6393, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.868421052631579, |
|
"grad_norm": 13.118946568151987, |
|
"learning_rate": 5.241835432246888e-07, |
|
"loss": 0.6937, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.881578947368421, |
|
"grad_norm": 11.16288012285256, |
|
"learning_rate": 4.2603065755989493e-07, |
|
"loss": 0.7598, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.8947368421052632, |
|
"grad_norm": 8.752876273254847, |
|
"learning_rate": 3.3763885297822153e-07, |
|
"loss": 0.6724, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.9078947368421053, |
|
"grad_norm": 10.41179108138972, |
|
"learning_rate": 2.5919676204517073e-07, |
|
"loss": 0.7259, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.9210526315789473, |
|
"grad_norm": 8.452683523386511, |
|
"learning_rate": 1.908717841359048e-07, |
|
"loss": 0.7196, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9342105263157895, |
|
"grad_norm": 9.08322472471614, |
|
"learning_rate": 1.328097281965357e-07, |
|
"loss": 0.6541, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.9473684210526315, |
|
"grad_norm": 11.964823295684173, |
|
"learning_rate": 8.513450158049109e-08, |
|
"loss": 0.6954, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.9605263157894737, |
|
"grad_norm": 6.649921077114204, |
|
"learning_rate": 4.794784562397459e-08, |
|
"loss": 0.633, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.9736842105263158, |
|
"grad_norm": 9.29104017000911, |
|
"learning_rate": 2.1329118524827662e-08, |
|
"loss": 0.701, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.9868421052631579, |
|
"grad_norm": 7.6775727377006655, |
|
"learning_rate": 5.3351259881379016e-09, |
|
"loss": 0.6241, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 8.200287140678359, |
|
"learning_rate": 0.0, |
|
"loss": 0.8084, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.7041537165641785, |
|
"eval_runtime": 129.4756, |
|
"eval_samples_per_second": 39.428, |
|
"eval_steps_per_second": 1.236, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 76, |
|
"total_flos": 2.244031216091136e+16, |
|
"train_loss": 2.0892962131061052, |
|
"train_runtime": 1016.0826, |
|
"train_samples_per_second": 9.544, |
|
"train_steps_per_second": 0.075 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 76, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.244031216091136e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|