File size: 3,516 Bytes
afef1df 2e8125e afef1df 2e8125e afef1df 2e8125e afef1df 2e8125e afef1df 2e8125e afef1df 2e8125e afef1df 2e8125e afef1df 2e8125e afef1df |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
{
"best_metric": 0.47333332896232605,
"best_model_checkpoint": "./zephyr/05-04-24-Weni-WeniGPT-Agents-Zephyr-1.0.1-KTO_testing kto dataset during training-3_max_steps-145_batch_16_2024-04-05_ppid_9/checkpoint-100",
"epoch": 0.684931506849315,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14,
"grad_norm": 32.5521125793457,
"kl": 6.568634033203125,
"learning_rate": 0.00018285714285714286,
"logps/chosen": -309.11578369140625,
"logps/rejected": -330.8851013183594,
"loss": 0.4463,
"rewards/chosen": -1.5149470567703247,
"rewards/margins": 0.730856716632843,
"rewards/rejected": -2.1397347450256348,
"step": 20
},
{
"epoch": 0.27,
"grad_norm": 5.194253444671631,
"kl": 9.558149337768555,
"learning_rate": 0.00015571428571428572,
"logps/chosen": -317.7120666503906,
"logps/rejected": -336.9992980957031,
"loss": 0.4204,
"rewards/chosen": -0.900913655757904,
"rewards/margins": 1.870524287223816,
"rewards/rejected": -2.7778050899505615,
"step": 40
},
{
"epoch": 0.34,
"eval_kl": 0.0,
"eval_logps/chosen": -471.27984619140625,
"eval_logps/rejected": -470.8447570800781,
"eval_loss": 0.47356343269348145,
"eval_rewards/chosen": -17.10463523864746,
"eval_rewards/margins": 1.9298614263534546,
"eval_rewards/rejected": -19.51471710205078,
"eval_runtime": 137.637,
"eval_samples_per_second": 2.18,
"eval_steps_per_second": 0.545,
"step": 50
},
{
"epoch": 0.41,
"grad_norm": 0.0,
"kl": 0.2518434524536133,
"learning_rate": 0.00012714285714285714,
"logps/chosen": -638.1253051757812,
"logps/rejected": -641.34716796875,
"loss": 0.402,
"rewards/chosen": -33.93111038208008,
"rewards/margins": 1.04580819606781,
"rewards/rejected": -34.0406379699707,
"step": 60
},
{
"epoch": 0.55,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 9.857142857142858e-05,
"logps/chosen": -1467.325927734375,
"logps/rejected": -1537.5704345703125,
"loss": 0.45,
"rewards/chosen": -117.26831817626953,
"rewards/margins": 9.93100357055664,
"rewards/rejected": -124.3635025024414,
"step": 80
},
{
"epoch": 0.68,
"grad_norm": 0.0,
"kl": 0.0,
"learning_rate": 7e-05,
"logps/chosen": -2025.484375,
"logps/rejected": -1882.913818359375,
"loss": 0.4813,
"rewards/chosen": -169.48184204101562,
"rewards/margins": -10.249415397644043,
"rewards/rejected": -157.5347900390625,
"step": 100
},
{
"epoch": 0.68,
"eval_kl": 0.0,
"eval_logps/chosen": -1703.4486083984375,
"eval_logps/rejected": -1636.100341796875,
"eval_loss": 0.47333332896232605,
"eval_rewards/chosen": -140.32151794433594,
"eval_rewards/margins": -6.890669822692871,
"eval_rewards/rejected": -136.040283203125,
"eval_runtime": 137.631,
"eval_samples_per_second": 2.18,
"eval_steps_per_second": 0.545,
"step": 100
}
],
"logging_steps": 20,
"max_steps": 145,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}
|