|
{ |
|
"best_metric": 0.47333332896232605, |
|
"best_model_checkpoint": "./zephyr/05-04-24-Weni-WeniGPT-Agents-Zephyr-1.0.1-KTO_testing kto dataset during training-3_max_steps-145_batch_16_2024-04-05_ppid_9/checkpoint-100", |
|
"epoch": 0.684931506849315, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 4.223984241485596, |
|
"kl": 50.472633361816406, |
|
"learning_rate": 0.00019, |
|
"logps/chosen": -516.6061401367188, |
|
"logps/rejected": -730.8901977539062, |
|
"loss": 0.4891, |
|
"rewards/chosen": -2.2866313457489014, |
|
"rewards/margins": 9.197293281555176, |
|
"rewards/rejected": -11.027228355407715, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 4.9320197105407715, |
|
"kl": 227.55972290039062, |
|
"learning_rate": 0.00016142857142857145, |
|
"logps/chosen": -400.697998046875, |
|
"logps/rejected": -375.78631591796875, |
|
"loss": 0.5465, |
|
"rewards/chosen": 8.534103393554688, |
|
"rewards/margins": -13.850674629211426, |
|
"rewards/rejected": 23.049671173095703, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_kl": 45.850624084472656, |
|
"eval_logps/chosen": -710.6041870117188, |
|
"eval_logps/rejected": -693.1708374023438, |
|
"eval_loss": 0.5112424492835999, |
|
"eval_rewards/chosen": -20.341365814208984, |
|
"eval_rewards/margins": -17.1827449798584, |
|
"eval_rewards/rejected": -4.146603107452393, |
|
"eval_runtime": 138.4214, |
|
"eval_samples_per_second": 2.167, |
|
"eval_steps_per_second": 0.542, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.0030494395177811384, |
|
"kl": 210.3240966796875, |
|
"learning_rate": 0.00013285714285714287, |
|
"logps/chosen": -414.4272155761719, |
|
"logps/rejected": -540.6943969726562, |
|
"loss": 0.4697, |
|
"rewards/chosen": 17.047597885131836, |
|
"rewards/margins": 2.3560233116149902, |
|
"rewards/rejected": 15.122901916503906, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 52.698368072509766, |
|
"kl": 257.76116943359375, |
|
"learning_rate": 0.0001042857142857143, |
|
"logps/chosen": -320.7880859375, |
|
"logps/rejected": -430.06829833984375, |
|
"loss": 0.5455, |
|
"rewards/chosen": 19.29902458190918, |
|
"rewards/margins": -8.448366165161133, |
|
"rewards/rejected": 28.257070541381836, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.0, |
|
"kl": 40.73902130126953, |
|
"learning_rate": 7.571428571428571e-05, |
|
"logps/chosen": -1188.35693359375, |
|
"logps/rejected": -1296.0433349609375, |
|
"loss": 0.4745, |
|
"rewards/chosen": -56.30565643310547, |
|
"rewards/margins": 8.954660415649414, |
|
"rewards/rejected": -68.27855682373047, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"eval_kl": 0.0, |
|
"eval_logps/chosen": -2394.681884765625, |
|
"eval_logps/rejected": -2418.263916015625, |
|
"eval_loss": 0.47333332896232605, |
|
"eval_rewards/chosen": -188.7490997314453, |
|
"eval_rewards/margins": -12.401326179504395, |
|
"eval_rewards/rejected": -176.65597534179688, |
|
"eval_runtime": 138.4851, |
|
"eval_samples_per_second": 2.166, |
|
"eval_steps_per_second": 0.542, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 145, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|