vntl-13b-v0.2-qlora / checkpoint-50 /trainer_state.json
lmg-anon's picture
Upload folder using huggingface_hub
e4a241e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.11622501162250116,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 7.222222222222222e-05,
"loss": 2.2776,
"step": 1
},
{
"epoch": 0.0,
"learning_rate": 0.00014444444444444444,
"loss": 2.2089,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 0.00021666666666666666,
"loss": 2.2124,
"step": 3
},
{
"epoch": 0.01,
"learning_rate": 0.0002888888888888889,
"loss": 2.0872,
"step": 4
},
{
"epoch": 0.01,
"learning_rate": 0.0003611111111111111,
"loss": 1.9608,
"step": 5
},
{
"epoch": 0.01,
"learning_rate": 0.0004333333333333333,
"loss": 1.9254,
"step": 6
},
{
"epoch": 0.02,
"learning_rate": 0.0005055555555555555,
"loss": 1.8716,
"step": 7
},
{
"epoch": 0.02,
"learning_rate": 0.0005777777777777778,
"loss": 1.8424,
"step": 8
},
{
"epoch": 0.02,
"learning_rate": 0.00065,
"loss": 1.8139,
"step": 9
},
{
"epoch": 0.02,
"learning_rate": 0.0006499909512851264,
"loss": 1.8298,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 0.0006499638056443784,
"loss": 1.735,
"step": 11
},
{
"epoch": 0.03,
"learning_rate": 0.0006499185645893443,
"loss": 1.707,
"step": 12
},
{
"epoch": 0.03,
"learning_rate": 0.0006498552306392452,
"loss": 1.7144,
"step": 13
},
{
"epoch": 0.03,
"learning_rate": 0.0006497738073207941,
"loss": 1.6774,
"step": 14
},
{
"epoch": 0.03,
"learning_rate": 0.0006496742991679994,
"loss": 1.6373,
"step": 15
},
{
"epoch": 0.04,
"learning_rate": 0.0006495567117219131,
"loss": 1.5874,
"step": 16
},
{
"epoch": 0.04,
"learning_rate": 0.0006494210515303213,
"loss": 1.5767,
"step": 17
},
{
"epoch": 0.04,
"learning_rate": 0.0006492673261473803,
"loss": 1.3921,
"step": 18
},
{
"epoch": 0.04,
"learning_rate": 0.0006490955441331957,
"loss": 1.2696,
"step": 19
},
{
"epoch": 0.05,
"learning_rate": 0.0006489057150533456,
"loss": 1.065,
"step": 20
},
{
"epoch": 0.05,
"learning_rate": 0.0006486978494783486,
"loss": 0.9967,
"step": 21
},
{
"epoch": 0.05,
"learning_rate": 0.0006484719589830741,
"loss": 0.8554,
"step": 22
},
{
"epoch": 0.05,
"learning_rate": 0.0006482280561460987,
"loss": 0.8944,
"step": 23
},
{
"epoch": 0.06,
"learning_rate": 0.0006479661545490054,
"loss": 0.8563,
"step": 24
},
{
"epoch": 0.06,
"learning_rate": 0.000647686268775627,
"loss": 0.9036,
"step": 25
},
{
"epoch": 0.06,
"eval_loss": 0.782578706741333,
"eval_runtime": 47.3187,
"eval_samples_per_second": 2.113,
"eval_steps_per_second": 0.528,
"step": 25
},
{
"epoch": 0.06,
"learning_rate": 0.0006473884144112352,
"loss": 0.8622,
"step": 26
},
{
"epoch": 0.06,
"learning_rate": 0.0006470726080416708,
"loss": 0.8543,
"step": 27
},
{
"epoch": 0.07,
"learning_rate": 0.000646738867252422,
"loss": 0.821,
"step": 28
},
{
"epoch": 0.07,
"learning_rate": 0.0006463872106276441,
"loss": 0.798,
"step": 29
},
{
"epoch": 0.07,
"learning_rate": 0.0006460176577491251,
"loss": 0.7795,
"step": 30
},
{
"epoch": 0.07,
"learning_rate": 0.0006456302291951948,
"loss": 0.845,
"step": 31
},
{
"epoch": 0.07,
"learning_rate": 0.0006452249465395796,
"loss": 0.8199,
"step": 32
},
{
"epoch": 0.08,
"learning_rate": 0.0006448018323502008,
"loss": 0.8371,
"step": 33
},
{
"epoch": 0.08,
"learning_rate": 0.0006443609101879176,
"loss": 0.8235,
"step": 34
},
{
"epoch": 0.08,
"learning_rate": 0.0006439022046052159,
"loss": 0.8063,
"step": 35
},
{
"epoch": 0.08,
"learning_rate": 0.0006434257411448404,
"loss": 0.8153,
"step": 36
},
{
"epoch": 0.09,
"learning_rate": 0.0006429315463383726,
"loss": 0.8219,
"step": 37
},
{
"epoch": 0.09,
"learning_rate": 0.0006424196477047534,
"loss": 0.8167,
"step": 38
},
{
"epoch": 0.09,
"learning_rate": 0.0006418900737487508,
"loss": 0.7766,
"step": 39
},
{
"epoch": 0.09,
"learning_rate": 0.0006413428539593724,
"loss": 0.8465,
"step": 40
},
{
"epoch": 0.1,
"learning_rate": 0.0006407780188082232,
"loss": 0.7966,
"step": 41
},
{
"epoch": 0.1,
"learning_rate": 0.0006401955997478094,
"loss": 0.8225,
"step": 42
},
{
"epoch": 0.1,
"learning_rate": 0.0006395956292097865,
"loss": 0.7915,
"step": 43
},
{
"epoch": 0.1,
"learning_rate": 0.0006389781406031534,
"loss": 0.7923,
"step": 44
},
{
"epoch": 0.1,
"learning_rate": 0.0006383431683123921,
"loss": 0.8051,
"step": 45
},
{
"epoch": 0.11,
"learning_rate": 0.0006376907476955534,
"loss": 0.8127,
"step": 46
},
{
"epoch": 0.11,
"learning_rate": 0.000637020915082287,
"loss": 0.7747,
"step": 47
},
{
"epoch": 0.11,
"learning_rate": 0.00063633370777182,
"loss": 0.797,
"step": 48
},
{
"epoch": 0.11,
"learning_rate": 0.0006356291640308783,
"loss": 0.743,
"step": 49
},
{
"epoch": 0.12,
"learning_rate": 0.000634907323091557,
"loss": 0.8201,
"step": 50
},
{
"epoch": 0.12,
"eval_loss": 0.7397407293319702,
"eval_runtime": 47.3134,
"eval_samples_per_second": 2.114,
"eval_steps_per_second": 0.528,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 430,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"total_flos": 1.2212033932689408e+17,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}