|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 268, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0037313432835820895, |
|
"grad_norm": 267.42432987632304, |
|
"learning_rate": 7.4074074074074075e-06, |
|
"loss": 2.0376, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.018656716417910446, |
|
"grad_norm": 497.8829601573007, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 5.6478, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03731343283582089, |
|
"grad_norm": 454.2837874993471, |
|
"learning_rate": 7.407407407407407e-05, |
|
"loss": 9.0654, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.055970149253731345, |
|
"grad_norm": 172.70761646653747, |
|
"learning_rate": 0.00011111111111111112, |
|
"loss": 11.6263, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.07462686567164178, |
|
"grad_norm": 431.0320152584827, |
|
"learning_rate": 0.00014814814814814815, |
|
"loss": 12.0175, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.09328358208955224, |
|
"grad_norm": 57.24075920715652, |
|
"learning_rate": 0.0001851851851851852, |
|
"loss": 10.5566, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.11194029850746269, |
|
"grad_norm": 207.16475278732815, |
|
"learning_rate": 0.00019992354201925428, |
|
"loss": 22.4578, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.13059701492537312, |
|
"grad_norm": 39.84010000225324, |
|
"learning_rate": 0.0001994567221375987, |
|
"loss": 13.0098, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.14925373134328357, |
|
"grad_norm": 421.8360841321533, |
|
"learning_rate": 0.00019856753906964686, |
|
"loss": 17.1304, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.16791044776119404, |
|
"grad_norm": 32.18021173317482, |
|
"learning_rate": 0.00019725976891203376, |
|
"loss": 9.8365, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.1865671641791045, |
|
"grad_norm": 17.663716447972774, |
|
"learning_rate": 0.00019553896537655318, |
|
"loss": 7.5461, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.20522388059701493, |
|
"grad_norm": 15.828267774314414, |
|
"learning_rate": 0.0001934124362051919, |
|
"loss": 6.9658, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.22388059701492538, |
|
"grad_norm": 307.89143980814515, |
|
"learning_rate": 0.000190889212136318, |
|
"loss": 7.6309, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.24253731343283583, |
|
"grad_norm": 17.3100107773664, |
|
"learning_rate": 0.0001879800085538147, |
|
"loss": 6.9331, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.26119402985074625, |
|
"grad_norm": 42.588666065454795, |
|
"learning_rate": 0.00018469717998202462, |
|
"loss": 6.9227, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2798507462686567, |
|
"grad_norm": 10.804006051624885, |
|
"learning_rate": 0.00018105466761975109, |
|
"loss": 6.5832, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.29850746268656714, |
|
"grad_norm": 43.74337828897061, |
|
"learning_rate": 0.00017706794013612364, |
|
"loss": 6.8176, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.31716417910447764, |
|
"grad_norm": 64.2085843780536, |
|
"learning_rate": 0.00017275392797975032, |
|
"loss": 6.7523, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.3358208955223881, |
|
"grad_norm": 16.593450775671112, |
|
"learning_rate": 0.0001681309514801265, |
|
"loss": 6.5873, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.35447761194029853, |
|
"grad_norm": 44.277670283010906, |
|
"learning_rate": 0.00016321864304663173, |
|
"loss": 6.5937, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.373134328358209, |
|
"grad_norm": 10.997588847093564, |
|
"learning_rate": 0.0001580378637955128, |
|
"loss": 6.4452, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3917910447761194, |
|
"grad_norm": 12.654122748772814, |
|
"learning_rate": 0.00015261061495891345, |
|
"loss": 6.3491, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.41044776119402987, |
|
"grad_norm": 26.609938261473722, |
|
"learning_rate": 0.00014695994445216985, |
|
"loss": 6.4977, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.4291044776119403, |
|
"grad_norm": 29.405974371105227, |
|
"learning_rate": 0.00014110984899615367, |
|
"loss": 6.3102, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.44776119402985076, |
|
"grad_norm": 14.382835497900352, |
|
"learning_rate": 0.000135085172210319, |
|
"loss": 6.2751, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.4664179104477612, |
|
"grad_norm": 27.156229733294634, |
|
"learning_rate": 0.00012891149910922267, |
|
"loss": 6.148, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.48507462686567165, |
|
"grad_norm": 14.830451492836382, |
|
"learning_rate": 0.00012261504745055964, |
|
"loss": 5.9625, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.503731343283582, |
|
"grad_norm": 16.181539488534295, |
|
"learning_rate": 0.00011622255639612554, |
|
"loss": 5.8024, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.5223880597014925, |
|
"grad_norm": 7.100118045807948, |
|
"learning_rate": 0.00010976117295853154, |
|
"loss": 5.7823, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.5410447761194029, |
|
"grad_norm": 6.078519973242612, |
|
"learning_rate": 0.00010325833671589687, |
|
"loss": 5.6857, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.5597014925373134, |
|
"grad_norm": 10.507318938395432, |
|
"learning_rate": 9.674166328410318e-05, |
|
"loss": 5.5985, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.5783582089552238, |
|
"grad_norm": 7.511601189522553, |
|
"learning_rate": 9.023882704146848e-05, |
|
"loss": 5.5306, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 9.098000461581764, |
|
"learning_rate": 8.377744360387447e-05, |
|
"loss": 5.4113, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.6156716417910447, |
|
"grad_norm": 11.04848131475541, |
|
"learning_rate": 7.738495254944042e-05, |
|
"loss": 5.3399, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.6343283582089553, |
|
"grad_norm": 4.899181488882252, |
|
"learning_rate": 7.108850089077735e-05, |
|
"loss": 5.2061, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.6529850746268657, |
|
"grad_norm": 8.571361713934442, |
|
"learning_rate": 6.491482778968104e-05, |
|
"loss": 5.1067, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.6716417910447762, |
|
"grad_norm": 6.366810352482946, |
|
"learning_rate": 5.889015100384636e-05, |
|
"loss": 4.9611, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.6902985074626866, |
|
"grad_norm": 7.987975314659695, |
|
"learning_rate": 5.304005554783015e-05, |
|
"loss": 4.9129, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.7089552238805971, |
|
"grad_norm": 4.106981286779299, |
|
"learning_rate": 4.738938504108659e-05, |
|
"loss": 4.8382, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.7276119402985075, |
|
"grad_norm": 3.7918439805690527, |
|
"learning_rate": 4.196213620448723e-05, |
|
"loss": 4.7711, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.746268656716418, |
|
"grad_norm": 3.6468350915483128, |
|
"learning_rate": 3.6781356953368284e-05, |
|
"loss": 4.6595, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.7649253731343284, |
|
"grad_norm": 2.974947620754676, |
|
"learning_rate": 3.186904851987351e-05, |
|
"loss": 4.638, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.7835820895522388, |
|
"grad_norm": 2.355137193482797, |
|
"learning_rate": 2.724607202024969e-05, |
|
"loss": 4.5794, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.8022388059701493, |
|
"grad_norm": 3.096708977884932, |
|
"learning_rate": 2.2932059863876365e-05, |
|
"loss": 4.5585, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.8208955223880597, |
|
"grad_norm": 2.8473392855240984, |
|
"learning_rate": 1.8945332380248913e-05, |
|
"loss": 4.4602, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.8395522388059702, |
|
"grad_norm": 2.7970718450568692, |
|
"learning_rate": 1.5302820017975394e-05, |
|
"loss": 4.419, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.8582089552238806, |
|
"grad_norm": 3.5908637322056407, |
|
"learning_rate": 1.2019991446185309e-05, |
|
"loss": 4.394, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.8768656716417911, |
|
"grad_norm": 2.327431302717702, |
|
"learning_rate": 9.110787863682002e-06, |
|
"loss": 4.3572, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.8955223880597015, |
|
"grad_norm": 3.3167882122894534, |
|
"learning_rate": 6.587563794808127e-06, |
|
"loss": 4.3148, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.914179104477612, |
|
"grad_norm": 3.128747890951307, |
|
"learning_rate": 4.461034623446847e-06, |
|
"loss": 4.3543, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.9328358208955224, |
|
"grad_norm": 2.4600560952322423, |
|
"learning_rate": 2.7402310879662497e-06, |
|
"loss": 4.301, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.9514925373134329, |
|
"grad_norm": 2.0585484791225954, |
|
"learning_rate": 1.43246093035313e-06, |
|
"loss": 4.2912, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.9701492537313433, |
|
"grad_norm": 1.864333596517171, |
|
"learning_rate": 5.432778624013257e-07, |
|
"loss": 4.281, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.9888059701492538, |
|
"grad_norm": 1.6036658756559803, |
|
"learning_rate": 7.645798074572552e-08, |
|
"loss": 4.2827, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 5.337969779968262, |
|
"eval_runtime": 2.3919, |
|
"eval_samples_per_second": 0.836, |
|
"eval_steps_per_second": 0.418, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 268, |
|
"total_flos": 14002264473600.0, |
|
"train_loss": 6.666975224196022, |
|
"train_runtime": 4378.2382, |
|
"train_samples_per_second": 1.956, |
|
"train_steps_per_second": 0.061 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 268, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 14002264473600.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|