yh_codellama / checkpoint-13500 /trainer_state.json
Yhhhhhhhhh's picture
Upload folder using huggingface_hub
151bc49 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 7.657402155416903,
"eval_steps": 500,
"global_step": 13500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.28360748723766305,
"grad_norm": 4.999021530151367,
"learning_rate": 1.971639251276234e-05,
"loss": 6.7525,
"step": 500
},
{
"epoch": 0.5672149744753261,
"grad_norm": 8.877035140991211,
"learning_rate": 1.9432785025524678e-05,
"loss": 4.8633,
"step": 1000
},
{
"epoch": 0.8508224617129893,
"grad_norm": 5.504563808441162,
"learning_rate": 1.9149177538287012e-05,
"loss": 4.7848,
"step": 1500
},
{
"epoch": 1.0,
"eval_runtime": 39.1159,
"eval_samples_per_second": 5.011,
"eval_steps_per_second": 5.011,
"step": 1763
},
{
"epoch": 1.1344299489506522,
"grad_norm": 4.3093953132629395,
"learning_rate": 1.886557005104935e-05,
"loss": 4.7304,
"step": 2000
},
{
"epoch": 1.4180374361883152,
"grad_norm": 8.88564395904541,
"learning_rate": 1.8581962563811688e-05,
"loss": 4.7621,
"step": 2500
},
{
"epoch": 1.7016449234259783,
"grad_norm": 4.924105644226074,
"learning_rate": 1.8298355076574022e-05,
"loss": 4.7163,
"step": 3000
},
{
"epoch": 1.9852524106636416,
"grad_norm": 5.222480773925781,
"learning_rate": 1.801474758933636e-05,
"loss": 4.6234,
"step": 3500
},
{
"epoch": 2.0,
"eval_runtime": 40.3571,
"eval_samples_per_second": 4.857,
"eval_steps_per_second": 4.857,
"step": 3526
},
{
"epoch": 2.2688598979013044,
"grad_norm": 4.8733978271484375,
"learning_rate": 1.7731140102098695e-05,
"loss": 4.7049,
"step": 4000
},
{
"epoch": 2.552467385138968,
"grad_norm": 5.571890830993652,
"learning_rate": 1.7447532614861033e-05,
"loss": 4.5987,
"step": 4500
},
{
"epoch": 2.8360748723766305,
"grad_norm": 5.371450424194336,
"learning_rate": 1.716392512762337e-05,
"loss": 4.6503,
"step": 5000
},
{
"epoch": 3.0,
"eval_runtime": 36.7471,
"eval_samples_per_second": 5.334,
"eval_steps_per_second": 5.334,
"step": 5289
},
{
"epoch": 3.119682359614294,
"grad_norm": 6.941244602203369,
"learning_rate": 1.688031764038571e-05,
"loss": 4.7147,
"step": 5500
},
{
"epoch": 3.403289846851957,
"grad_norm": 7.103884220123291,
"learning_rate": 1.6596710153148043e-05,
"loss": 4.6078,
"step": 6000
},
{
"epoch": 3.68689733408962,
"grad_norm": 6.286114692687988,
"learning_rate": 1.631310266591038e-05,
"loss": 4.6098,
"step": 6500
},
{
"epoch": 3.970504821327283,
"grad_norm": 11.397751808166504,
"learning_rate": 1.602949517867272e-05,
"loss": 4.583,
"step": 7000
},
{
"epoch": 4.0,
"eval_runtime": 39.4552,
"eval_samples_per_second": 4.968,
"eval_steps_per_second": 4.968,
"step": 7052
},
{
"epoch": 4.254112308564946,
"grad_norm": 6.588546276092529,
"learning_rate": 1.5745887691435057e-05,
"loss": 4.6439,
"step": 7500
},
{
"epoch": 4.537719795802609,
"grad_norm": 6.508044719696045,
"learning_rate": 1.546228020419739e-05,
"loss": 4.5772,
"step": 8000
},
{
"epoch": 4.821327283040272,
"grad_norm": 13.856376647949219,
"learning_rate": 1.5178672716959728e-05,
"loss": 4.5647,
"step": 8500
},
{
"epoch": 5.0,
"eval_runtime": 37.0746,
"eval_samples_per_second": 5.287,
"eval_steps_per_second": 5.287,
"step": 8815
},
{
"epoch": 5.104934770277936,
"grad_norm": 11.172317504882812,
"learning_rate": 1.4895065229722066e-05,
"loss": 4.6058,
"step": 9000
},
{
"epoch": 5.388542257515598,
"grad_norm": 9.843143463134766,
"learning_rate": 1.4611457742484402e-05,
"loss": 4.5818,
"step": 9500
},
{
"epoch": 5.672149744753262,
"grad_norm": 6.524191856384277,
"learning_rate": 1.432785025524674e-05,
"loss": 4.5783,
"step": 10000
},
{
"epoch": 5.955757231990924,
"grad_norm": 13.230880737304688,
"learning_rate": 1.4044242768009078e-05,
"loss": 4.527,
"step": 10500
},
{
"epoch": 6.0,
"eval_runtime": 39.9935,
"eval_samples_per_second": 4.901,
"eval_steps_per_second": 4.901,
"step": 10578
},
{
"epoch": 6.239364719228588,
"grad_norm": 10.95142936706543,
"learning_rate": 1.3760635280771412e-05,
"loss": 4.5873,
"step": 11000
},
{
"epoch": 6.5229722064662505,
"grad_norm": 7.862865924835205,
"learning_rate": 1.347702779353375e-05,
"loss": 4.5154,
"step": 11500
},
{
"epoch": 6.806579693703914,
"grad_norm": 7.10567045211792,
"learning_rate": 1.3193420306296087e-05,
"loss": 4.5425,
"step": 12000
},
{
"epoch": 7.0,
"eval_runtime": 37.9349,
"eval_samples_per_second": 5.167,
"eval_steps_per_second": 5.167,
"step": 12341
},
{
"epoch": 7.090187180941577,
"grad_norm": 8.122684478759766,
"learning_rate": 1.2909812819058425e-05,
"loss": 4.5825,
"step": 12500
},
{
"epoch": 7.37379466817924,
"grad_norm": 10.316974639892578,
"learning_rate": 1.262620533182076e-05,
"loss": 4.5289,
"step": 13000
},
{
"epoch": 7.657402155416903,
"grad_norm": 7.87723445892334,
"learning_rate": 1.2342597844583097e-05,
"loss": 4.4922,
"step": 13500
}
],
"logging_steps": 500,
"max_steps": 35260,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.74196418527232e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}