File size: 7,025 Bytes
3269d0f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
{
"best_metric": 0.275594562292099,
"best_model_checkpoint": "./mistral/20-04-24-Weni-WeniGPT-Agents-Mistral-1.0.6-SFT-1.0.8-DPO_Experiment on DPO with other hyperparameters and best SFT model of WeniGPT-2_max_steps-180_batch_8_2024-04-20_ppid_9/checkpoint-90",
"epoch": 2.903225806451613,
"eval_steps": 30,
"global_step": 90,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.32,
"grad_norm": 27.100269317626953,
"learning_rate": 5e-06,
"logits/chosen": -1.7583906650543213,
"logits/rejected": -1.8312015533447266,
"logps/chosen": -173.15086364746094,
"logps/rejected": -269.08062744140625,
"loss": 0.69,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": 0.01006038673222065,
"rewards/margins": 0.011446094140410423,
"rewards/rejected": -0.0013857081066817045,
"step": 10
},
{
"epoch": 0.65,
"grad_norm": NaN,
"learning_rate": 4.741379310344828e-06,
"logits/chosen": -1.7850843667984009,
"logits/rejected": -1.81415593624115,
"logps/chosen": -196.91897583007812,
"logps/rejected": -205.7848663330078,
"loss": 0.6264,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": 0.17185011506080627,
"rewards/margins": 0.14982673525810242,
"rewards/rejected": 0.022023344412446022,
"step": 20
},
{
"epoch": 0.97,
"grad_norm": 23.862794876098633,
"learning_rate": 4.454022988505747e-06,
"logits/chosen": -1.7745271921157837,
"logits/rejected": -1.8100026845932007,
"logps/chosen": -197.96493530273438,
"logps/rejected": -184.85069274902344,
"loss": 0.5477,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.3463248908519745,
"rewards/margins": 0.35388293862342834,
"rewards/rejected": -0.007558024022728205,
"step": 30
},
{
"epoch": 0.97,
"eval_logits/chosen": -1.7948826551437378,
"eval_logits/rejected": -1.8862664699554443,
"eval_logps/chosen": -107.27352142333984,
"eval_logps/rejected": -269.4668273925781,
"eval_loss": 0.48426297307014465,
"eval_rewards/accuracies": 0.8571428656578064,
"eval_rewards/chosen": 0.4718170166015625,
"eval_rewards/margins": 0.5826946496963501,
"eval_rewards/rejected": -0.1108776405453682,
"eval_runtime": 9.9214,
"eval_samples_per_second": 2.822,
"eval_steps_per_second": 0.706,
"step": 30
},
{
"epoch": 1.29,
"grad_norm": 33.898590087890625,
"learning_rate": 4.166666666666667e-06,
"logits/chosen": -1.7890510559082031,
"logits/rejected": -1.8475368022918701,
"logps/chosen": -193.50033569335938,
"logps/rejected": -206.31741333007812,
"loss": 0.4661,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.4587056636810303,
"rewards/margins": 0.5034217238426208,
"rewards/rejected": -0.04471604526042938,
"step": 40
},
{
"epoch": 1.61,
"grad_norm": 29.36012840270996,
"learning_rate": 3.8793103448275865e-06,
"logits/chosen": -1.804456353187561,
"logits/rejected": -1.8586517572402954,
"logps/chosen": -136.4575958251953,
"logps/rejected": -212.65231323242188,
"loss": 0.4418,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.8421177864074707,
"rewards/margins": 0.988998293876648,
"rewards/rejected": -0.146880641579628,
"step": 50
},
{
"epoch": 1.94,
"grad_norm": 25.15074348449707,
"learning_rate": 3.5919540229885056e-06,
"logits/chosen": -1.8009055852890015,
"logits/rejected": -1.8550602197647095,
"logps/chosen": -145.14044189453125,
"logps/rejected": -265.5760192871094,
"loss": 0.3542,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.742743730545044,
"rewards/margins": 1.1478015184402466,
"rewards/rejected": -0.4050576686859131,
"step": 60
},
{
"epoch": 1.94,
"eval_logits/chosen": -1.7979233264923096,
"eval_logits/rejected": -1.8902829885482788,
"eval_logps/chosen": -105.64314270019531,
"eval_logps/rejected": -269.93597412109375,
"eval_loss": 0.344027042388916,
"eval_rewards/accuracies": 1.0,
"eval_rewards/chosen": 0.96092689037323,
"eval_rewards/margins": 1.212537407875061,
"eval_rewards/rejected": -0.2516104578971863,
"eval_runtime": 9.9306,
"eval_samples_per_second": 2.82,
"eval_steps_per_second": 0.705,
"step": 60
},
{
"epoch": 2.26,
"grad_norm": 16.264854431152344,
"learning_rate": 3.3045977011494256e-06,
"logits/chosen": -1.8283793926239014,
"logits/rejected": -1.8581088781356812,
"logps/chosen": -245.49520874023438,
"logps/rejected": -240.85055541992188,
"loss": 0.3242,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": 0.9433773756027222,
"rewards/margins": 0.9048371315002441,
"rewards/rejected": 0.0385403148829937,
"step": 70
},
{
"epoch": 2.58,
"grad_norm": 18.855249404907227,
"learning_rate": 3.017241379310345e-06,
"logits/chosen": -1.912719964981079,
"logits/rejected": -1.9267578125,
"logps/chosen": -184.6862030029297,
"logps/rejected": -150.09349060058594,
"loss": 0.3063,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 0.9958856701850891,
"rewards/margins": 1.2241456508636475,
"rewards/rejected": -0.2282601296901703,
"step": 80
},
{
"epoch": 2.9,
"grad_norm": 18.644786834716797,
"learning_rate": 2.729885057471265e-06,
"logits/chosen": -1.8041893243789673,
"logits/rejected": -1.8382984399795532,
"logps/chosen": -122.60682678222656,
"logps/rejected": -190.001220703125,
"loss": 0.2892,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": 1.1825361251831055,
"rewards/margins": 1.4003632068634033,
"rewards/rejected": -0.2178269922733307,
"step": 90
},
{
"epoch": 2.9,
"eval_logits/chosen": -1.799451470375061,
"eval_logits/rejected": -1.8927640914916992,
"eval_logps/chosen": -104.44667053222656,
"eval_logps/rejected": -270.4913635253906,
"eval_loss": 0.275594562292099,
"eval_rewards/accuracies": 1.0,
"eval_rewards/chosen": 1.319871187210083,
"eval_rewards/margins": 1.7381019592285156,
"eval_rewards/rejected": -0.4182307720184326,
"eval_runtime": 9.9268,
"eval_samples_per_second": 2.821,
"eval_steps_per_second": 0.705,
"step": 90
}
],
"logging_steps": 10,
"max_steps": 180,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 90,
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|