File size: 7,042 Bytes
1bb6814 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
{
"best_metric": 0.4368760883808136,
"best_model_checkpoint": "./mistral/20-04-24-Weni-WeniGPT-Agents-Mistral-1.0.6-SFT-1.0.6-DPO_Experiment on DPO with other hyperparameters and best SFT model of WeniGPT-2_max_steps-90_batch_16_2024-04-20_ppid_9/checkpoint-90",
"epoch": 5.806451612903226,
"eval_steps": 30,
"global_step": 90,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.65,
"grad_norm": 16.76080894470215,
"learning_rate": 4.770114942528735e-06,
"logits/chosen": -1.8055893182754517,
"logits/rejected": -1.828260064125061,
"logps/chosen": -46.100120544433594,
"logps/rejected": -82.48685455322266,
"loss": 0.6795,
"rewards/accuracies": 0.25,
"rewards/chosen": 0.04209427908062935,
"rewards/margins": 0.04848780483007431,
"rewards/rejected": -0.006393528077751398,
"step": 10
},
{
"epoch": 1.29,
"grad_norm": 11.445436477661133,
"learning_rate": 4.1954022988505746e-06,
"logits/chosen": -1.8350013494491577,
"logits/rejected": -1.8602994680404663,
"logps/chosen": -37.913516998291016,
"logps/rejected": -67.64692687988281,
"loss": 0.6251,
"rewards/accuracies": 0.3499999940395355,
"rewards/chosen": 0.15703639388084412,
"rewards/margins": 0.1911666840314865,
"rewards/rejected": -0.0341302864253521,
"step": 20
},
{
"epoch": 1.94,
"grad_norm": 11.545478820800781,
"learning_rate": 3.620689655172414e-06,
"logits/chosen": -1.8373997211456299,
"logits/rejected": -1.847825050354004,
"logps/chosen": -29.99410057067871,
"logps/rejected": -59.1256217956543,
"loss": 0.5772,
"rewards/accuracies": 0.30000001192092896,
"rewards/chosen": 0.27005138993263245,
"rewards/margins": 0.30424270033836365,
"rewards/rejected": -0.0341913141310215,
"step": 30
},
{
"epoch": 1.94,
"eval_logits/chosen": -1.81316077709198,
"eval_logits/rejected": -1.8455785512924194,
"eval_logps/chosen": -36.48888397216797,
"eval_logps/rejected": -55.34504699707031,
"eval_loss": 0.5171023011207581,
"eval_rewards/accuracies": 0.5,
"eval_rewards/chosen": 0.5191236138343811,
"eval_rewards/margins": 0.5845221877098083,
"eval_rewards/rejected": -0.06539855152368546,
"eval_runtime": 2.4917,
"eval_samples_per_second": 11.237,
"eval_steps_per_second": 1.605,
"step": 30
},
{
"epoch": 2.58,
"grad_norm": 7.317459583282471,
"learning_rate": 3.0459770114942533e-06,
"logits/chosen": -1.7586491107940674,
"logits/rejected": -1.7903881072998047,
"logps/chosen": -52.9969367980957,
"logps/rejected": -87.54074096679688,
"loss": 0.5181,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.6470302939414978,
"rewards/margins": 0.7092825174331665,
"rewards/rejected": -0.06225220113992691,
"step": 40
},
{
"epoch": 3.23,
"grad_norm": 5.132155418395996,
"learning_rate": 2.471264367816092e-06,
"logits/chosen": -1.8951135873794556,
"logits/rejected": -1.9210799932479858,
"logps/chosen": -34.33287811279297,
"logps/rejected": -68.03052520751953,
"loss": 0.5415,
"rewards/accuracies": 0.32499998807907104,
"rewards/chosen": 0.5781847238540649,
"rewards/margins": 0.6501666903495789,
"rewards/rejected": -0.07198190689086914,
"step": 50
},
{
"epoch": 3.87,
"grad_norm": 7.053220748901367,
"learning_rate": 1.896551724137931e-06,
"logits/chosen": -1.9114513397216797,
"logits/rejected": -1.9271589517593384,
"logps/chosen": -25.962200164794922,
"logps/rejected": -48.20077133178711,
"loss": 0.5125,
"rewards/accuracies": 0.25,
"rewards/chosen": 0.47203439474105835,
"rewards/margins": 0.5603113174438477,
"rewards/rejected": -0.08827687054872513,
"step": 60
},
{
"epoch": 3.87,
"eval_logits/chosen": -1.817034363746643,
"eval_logits/rejected": -1.8500827550888062,
"eval_logps/chosen": -35.225791931152344,
"eval_logps/rejected": -55.629276275634766,
"eval_loss": 0.4517291486263275,
"eval_rewards/accuracies": 0.5,
"eval_rewards/chosen": 0.8980507254600525,
"eval_rewards/margins": 1.048719048500061,
"eval_rewards/rejected": -0.15066833794116974,
"eval_runtime": 2.4913,
"eval_samples_per_second": 11.239,
"eval_steps_per_second": 1.606,
"step": 60
},
{
"epoch": 4.52,
"grad_norm": 3.6152443885803223,
"learning_rate": 1.3218390804597702e-06,
"logits/chosen": -1.8332040309906006,
"logits/rejected": -1.8538074493408203,
"logps/chosen": -34.457115173339844,
"logps/rejected": -72.48670959472656,
"loss": 0.4838,
"rewards/accuracies": 0.375,
"rewards/chosen": 0.6265449523925781,
"rewards/margins": 0.7487497329711914,
"rewards/rejected": -0.12220476567745209,
"step": 70
},
{
"epoch": 5.16,
"grad_norm": 3.271343231201172,
"learning_rate": 7.471264367816093e-07,
"logits/chosen": -1.7745481729507446,
"logits/rejected": -1.784841537475586,
"logps/chosen": -52.409339904785156,
"logps/rejected": -76.24700164794922,
"loss": 0.4665,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": 0.939862847328186,
"rewards/margins": 1.0275475978851318,
"rewards/rejected": -0.08768495172262192,
"step": 80
},
{
"epoch": 5.81,
"grad_norm": 2.297419786453247,
"learning_rate": 1.7241379310344828e-07,
"logits/chosen": -1.8448705673217773,
"logits/rejected": -1.8637123107910156,
"logps/chosen": -33.81181335449219,
"logps/rejected": -63.00860595703125,
"loss": 0.491,
"rewards/accuracies": 0.3499999940395355,
"rewards/chosen": 0.6832905411720276,
"rewards/margins": 0.8434805870056152,
"rewards/rejected": -0.1601901352405548,
"step": 90
},
{
"epoch": 5.81,
"eval_logits/chosen": -1.8184903860092163,
"eval_logits/rejected": -1.8517436981201172,
"eval_logps/chosen": -34.81117630004883,
"eval_logps/rejected": -55.731048583984375,
"eval_loss": 0.4368760883808136,
"eval_rewards/accuracies": 0.5,
"eval_rewards/chosen": 1.0224344730377197,
"eval_rewards/margins": 1.2036339044570923,
"eval_rewards/rejected": -0.18119940161705017,
"eval_runtime": 2.4928,
"eval_samples_per_second": 11.232,
"eval_steps_per_second": 1.605,
"step": 90
}
],
"logging_steps": 10,
"max_steps": 90,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 90,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}
|