lora-warewe-7b / checkpoint-400 /trainer_state.json
ideepankarsharma2003's picture
added all files
ca643a8
{
"best_metric": 0.8681809306144714,
"best_model_checkpoint": "./lora-warewe/checkpoint-400",
"epoch": 0.9932490106308683,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 2.9999999999999997e-05,
"loss": 1.9531,
"step": 10
},
{
"epoch": 0.05,
"learning_rate": 5.9999999999999995e-05,
"loss": 1.8714,
"step": 20
},
{
"epoch": 0.07,
"learning_rate": 8.999999999999999e-05,
"loss": 1.7052,
"step": 30
},
{
"epoch": 0.1,
"learning_rate": 0.00011999999999999999,
"loss": 1.3201,
"step": 40
},
{
"epoch": 0.12,
"learning_rate": 0.00015,
"loss": 1.1551,
"step": 50
},
{
"epoch": 0.15,
"learning_rate": 0.00017999999999999998,
"loss": 1.1199,
"step": 60
},
{
"epoch": 0.17,
"learning_rate": 0.00020999999999999998,
"loss": 1.0598,
"step": 70
},
{
"epoch": 0.2,
"learning_rate": 0.00023999999999999998,
"loss": 1.0233,
"step": 80
},
{
"epoch": 0.22,
"learning_rate": 0.00027,
"loss": 1.0086,
"step": 90
},
{
"epoch": 0.25,
"learning_rate": 0.0003,
"loss": 0.999,
"step": 100
},
{
"epoch": 0.27,
"learning_rate": 0.0002972875226039783,
"loss": 0.9961,
"step": 110
},
{
"epoch": 0.3,
"learning_rate": 0.00029457504520795656,
"loss": 0.9831,
"step": 120
},
{
"epoch": 0.32,
"learning_rate": 0.0002918625678119349,
"loss": 0.9637,
"step": 130
},
{
"epoch": 0.35,
"learning_rate": 0.00028915009041591315,
"loss": 0.9546,
"step": 140
},
{
"epoch": 0.37,
"learning_rate": 0.0002864376130198915,
"loss": 0.9575,
"step": 150
},
{
"epoch": 0.4,
"learning_rate": 0.00028372513562386974,
"loss": 0.9337,
"step": 160
},
{
"epoch": 0.42,
"learning_rate": 0.00028101265822784807,
"loss": 0.942,
"step": 170
},
{
"epoch": 0.45,
"learning_rate": 0.0002783001808318264,
"loss": 0.9265,
"step": 180
},
{
"epoch": 0.47,
"learning_rate": 0.0002755877034358047,
"loss": 0.9324,
"step": 190
},
{
"epoch": 0.5,
"learning_rate": 0.000272875226039783,
"loss": 0.9288,
"step": 200
},
{
"epoch": 0.5,
"eval_loss": 0.9210162162780762,
"eval_runtime": 559.4854,
"eval_samples_per_second": 3.575,
"eval_steps_per_second": 0.447,
"step": 200
},
{
"epoch": 0.52,
"learning_rate": 0.00027016274864376125,
"loss": 0.9201,
"step": 210
},
{
"epoch": 0.55,
"learning_rate": 0.00026745027124773957,
"loss": 0.9242,
"step": 220
},
{
"epoch": 0.57,
"learning_rate": 0.0002647377938517179,
"loss": 0.9114,
"step": 230
},
{
"epoch": 0.6,
"learning_rate": 0.00026202531645569616,
"loss": 0.9223,
"step": 240
},
{
"epoch": 0.62,
"learning_rate": 0.0002593128390596745,
"loss": 0.9018,
"step": 250
},
{
"epoch": 0.65,
"learning_rate": 0.0002566003616636528,
"loss": 0.9038,
"step": 260
},
{
"epoch": 0.67,
"learning_rate": 0.0002538878842676311,
"loss": 0.9087,
"step": 270
},
{
"epoch": 0.7,
"learning_rate": 0.0002511754068716094,
"loss": 0.9141,
"step": 280
},
{
"epoch": 0.72,
"learning_rate": 0.00024846292947558766,
"loss": 0.8942,
"step": 290
},
{
"epoch": 0.74,
"learning_rate": 0.000245750452079566,
"loss": 0.8865,
"step": 300
},
{
"epoch": 0.77,
"learning_rate": 0.00024303797468354428,
"loss": 0.8863,
"step": 310
},
{
"epoch": 0.79,
"learning_rate": 0.00024032549728752258,
"loss": 0.8981,
"step": 320
},
{
"epoch": 0.82,
"learning_rate": 0.0002376130198915009,
"loss": 0.8837,
"step": 330
},
{
"epoch": 0.84,
"learning_rate": 0.0002349005424954792,
"loss": 0.8887,
"step": 340
},
{
"epoch": 0.87,
"learning_rate": 0.0002321880650994575,
"loss": 0.8781,
"step": 350
},
{
"epoch": 0.89,
"learning_rate": 0.00022947558770343578,
"loss": 0.891,
"step": 360
},
{
"epoch": 0.92,
"learning_rate": 0.00022676311030741408,
"loss": 0.8852,
"step": 370
},
{
"epoch": 0.94,
"learning_rate": 0.00022405063291139237,
"loss": 0.8736,
"step": 380
},
{
"epoch": 0.97,
"learning_rate": 0.0002213381555153707,
"loss": 0.8799,
"step": 390
},
{
"epoch": 0.99,
"learning_rate": 0.000218625678119349,
"loss": 0.8847,
"step": 400
},
{
"epoch": 0.99,
"eval_loss": 0.8681809306144714,
"eval_runtime": 405.865,
"eval_samples_per_second": 4.928,
"eval_steps_per_second": 0.616,
"step": 400
}
],
"max_steps": 1206,
"num_train_epochs": 3,
"total_flos": 5.0266814048894976e+17,
"trial_name": null,
"trial_params": null
}