File size: 3,259 Bytes
d9dcb4b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
{
"best_metric": 0.027495555579662323,
"best_model_checkpoint": "./Zephyr/27-03-24-Weni-kto-test_WeniGPT Experiment using KTO trainer with no collator-2_max_steps-786_batch_16_2024-03-27_ppid_1885/checkpoint-100",
"epoch": 0.7619047619047619,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15,
"grad_norm": 2.2224462032318115,
"kl": 0.054589949548244476,
"learning_rate": 0.0001666666666666667,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 704.2891,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 20
},
{
"epoch": 0.3,
"grad_norm": 2.9781014919281006,
"kl": 0.014553820714354515,
"learning_rate": 0.00019580052493438322,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 371.0873,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 40
},
{
"epoch": 0.38,
"eval_kl": 0.014955863356590271,
"eval_logps/chosen": -139.6813507080078,
"eval_logps/rejected": -288.11480712890625,
"eval_loss": 0.0440419502556324,
"eval_rewards/chosen": 4.680752754211426,
"eval_rewards/margins": 14.199661254882812,
"eval_rewards/rejected": -9.518909454345703,
"eval_runtime": 437.1874,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.268,
"step": 50
},
{
"epoch": 0.46,
"grad_norm": 0.7002557516098022,
"kl": 0.36391472816467285,
"learning_rate": 0.0001905511811023622,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 165.1605,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 60
},
{
"epoch": 0.61,
"grad_norm": 0.5644651651382446,
"kl": 0.01063810009509325,
"learning_rate": 0.00018556430446194227,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 89.197,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 80
},
{
"epoch": 0.76,
"grad_norm": 0.18941974639892578,
"kl": 0.0,
"learning_rate": 0.0001805774278215223,
"logps/chosen": NaN,
"logps/rejected": NaN,
"loss": 57.9834,
"rewards/chosen": NaN,
"rewards/margins": NaN,
"rewards/rejected": NaN,
"step": 100
},
{
"epoch": 0.76,
"eval_kl": 0.0,
"eval_logps/chosen": -135.0946807861328,
"eval_logps/rejected": -511.870361328125,
"eval_loss": 0.027495555579662323,
"eval_rewards/chosen": 5.139419078826904,
"eval_rewards/margins": 37.033878326416016,
"eval_rewards/rejected": -31.89446258544922,
"eval_runtime": 437.2677,
"eval_samples_per_second": 1.07,
"eval_steps_per_second": 0.268,
"step": 100
}
],
"logging_steps": 20,
"max_steps": 786,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}
|