|
{ |
|
"best_metric": 0.47333332896232605, |
|
"best_model_checkpoint": "./zephyr/08-04-24-Weni-WeniGPT-Agents-Zephyr-1.0.14-KTO_Hyperparameter search, altering lora params for KTO task.-2_max_steps-145_batch_16_2024-04-08_ppid_9/checkpoint-100", |
|
"epoch": 0.684931506849315, |
|
"eval_steps": 50, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0001785714285714286, |
|
"loss": 0.4159, |
|
"step": 20, |
|
"train/kl": 0.052512504160404205, |
|
"train/logps/chosen": -1660.366455078125, |
|
"train/logps/rejected": -1587.2052408854167, |
|
"train/rewards/chosen": -135.80235290527344, |
|
"train/rewards/margins": -6.56842549641928, |
|
"train/rewards/rejected": -129.23392740885416 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.45, |
|
"step": 40, |
|
"train/kl": 0.0, |
|
"train/logps/chosen": -4467.318576388889, |
|
"train/logps/rejected": -4453.0703125, |
|
"train/rewards/chosen": -420.4342447916667, |
|
"train/rewards/margins": -3.8474638967803116, |
|
"train/rewards/rejected": -416.5867808948864 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval/kl": 0.0, |
|
"eval/logps/chosen": -5048.772007042254, |
|
"eval/logps/rejected": -4424.3093354430375, |
|
"eval/rewards/chosen": -476.51050836267603, |
|
"eval/rewards/margins": -60.5168869228026, |
|
"eval/rewards/rejected": -415.99362143987344, |
|
"eval_loss": 0.47333332896232605, |
|
"eval_runtime": 139.068, |
|
"eval_samples_per_second": 2.157, |
|
"eval_steps_per_second": 0.539, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00012142857142857143, |
|
"loss": 0.4969, |
|
"step": 60, |
|
"train/kl": 0.0, |
|
"train/logps/chosen": -5034.042845911949, |
|
"train/logps/rejected": -5042.78998447205, |
|
"train/rewards/chosen": -474.4616745283019, |
|
"train/rewards/margins": -0.15693850345718374, |
|
"train/rewards/rejected": -474.3047360248447 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.0, |
|
"learning_rate": 9.285714285714286e-05, |
|
"loss": 0.5281, |
|
"step": 80, |
|
"train/kl": 0.0, |
|
"train/logps/chosen": -5164.465606508876, |
|
"train/logps/rejected": -4831.312086092716, |
|
"train/rewards/chosen": -487.13577107988164, |
|
"train/rewards/margins": -31.98345319908691, |
|
"train/rewards/rejected": -455.1523178807947 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.0, |
|
"learning_rate": 6.428571428571429e-05, |
|
"loss": 0.4813, |
|
"step": 100, |
|
"train/kl": 0.0, |
|
"train/logps/chosen": -5274.45413961039, |
|
"train/logps/rejected": -4860.5839608433735, |
|
"train/rewards/chosen": -499.15376420454544, |
|
"train/rewards/margins": -40.16430637322014, |
|
"train/rewards/rejected": -458.9894578313253 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"eval/kl": 0.0, |
|
"eval/logps/chosen": -5079.107394366197, |
|
"eval/logps/rejected": -4448.600079113924, |
|
"eval/rewards/chosen": -479.54401408450707, |
|
"eval/rewards/margins": -61.12139739146909, |
|
"eval/rewards/rejected": -418.422616693038, |
|
"eval_loss": 0.47333332896232605, |
|
"eval_runtime": 139.0173, |
|
"eval_samples_per_second": 2.158, |
|
"eval_steps_per_second": 0.54, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 145, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|