File size: 3,833 Bytes
78176cd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
{
"best_metric": 0.3619329333305359,
"best_model_checkpoint": "./zephyr/08-04-24-Weni-WeniGPT-Agents-Zephyr-1.0.17-KTO_Hyperparameter search, altering lora params for KTO task.-2_max_steps-145_batch_16_2024-04-08_ppid_9/checkpoint-100",
"epoch": 0.684931506849315,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14,
"grad_norm": 6.565266132354736,
"learning_rate": 0.00018142857142857142,
"loss": 0.3847,
"step": 20,
"train/kl": 3.3989148139953613,
"train/logps/chosen": -298.30405479753523,
"train/logps/rejected": -317.74027826544943,
"train/rewards/chosen": 0.4325446276597574,
"train/rewards/margins": 3.3660581404318783,
"train/rewards/rejected": -2.933513512772121
},
{
"epoch": 0.27,
"grad_norm": 2.64931583404541,
"learning_rate": 0.00015285714285714287,
"loss": 0.4039,
"step": 40,
"train/kl": 0.9900484085083008,
"train/logps/chosen": -305.07652368012424,
"train/logps/rejected": -347.4229805424528,
"train/rewards/chosen": -2.13509221995099,
"train/rewards/margins": 5.288080256955457,
"train/rewards/rejected": -7.423172476906447
},
{
"epoch": 0.34,
"eval/kl": 0.7344650030136108,
"eval/logps/chosen": -329.9394806338028,
"eval/logps/rejected": -444.9535700158228,
"eval/rewards/chosen": -4.62774013465559,
"eval/rewards/margins": 13.430626471851532,
"eval/rewards/rejected": -18.05836660650712,
"eval_loss": 0.37672173976898193,
"eval_runtime": 140.2349,
"eval_samples_per_second": 2.139,
"eval_steps_per_second": 0.535,
"step": 50
},
{
"epoch": 0.41,
"grad_norm": 8.288690567016602,
"learning_rate": 0.00012714285714285714,
"loss": 0.3602,
"step": 60,
"train/kl": 2.266563653945923,
"train/logps/chosen": -318.9802876655629,
"train/logps/rejected": -473.3688517011834,
"train/rewards/chosen": -3.7534725366049253,
"train/rewards/margins": 16.50871138073975,
"train/rewards/rejected": -20.262183917344675
},
{
"epoch": 0.55,
"grad_norm": 4.295706748962402,
"learning_rate": 9.857142857142858e-05,
"loss": 0.3496,
"step": 80,
"train/kl": 0.8752914667129517,
"train/logps/chosen": -335.9194670376712,
"train/logps/rejected": -405.97503591954023,
"train/rewards/chosen": -3.1786450947800726,
"train/rewards/margins": 7.811195007141622,
"train/rewards/rejected": -10.989840101921695
},
{
"epoch": 0.68,
"grad_norm": 5.850632667541504,
"learning_rate": 7e-05,
"loss": 0.302,
"step": 100,
"train/kl": 0.4592212736606598,
"train/logps/chosen": -277.03286903782896,
"train/logps/rejected": -392.9672154017857,
"train/rewards/chosen": 0.3360620799817537,
"train/rewards/margins": 9.165781260134283,
"train/rewards/rejected": -8.82971918015253
},
{
"epoch": 0.68,
"eval/kl": 3.9467480182647705,
"eval/logps/chosen": -265.8084286971831,
"eval/logps/rejected": -297.73909711234177,
"eval/rewards/chosen": 1.7853647151463468,
"eval/rewards/margins": 5.122287276212407,
"eval/rewards/rejected": -3.3369225610660602,
"eval_loss": 0.3619329333305359,
"eval_runtime": 140.261,
"eval_samples_per_second": 2.139,
"eval_steps_per_second": 0.535,
"step": 100
}
],
"logging_steps": 20,
"max_steps": 145,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}
|