|
{ |
|
"best_metric": 0.040502406656742096, |
|
"best_model_checkpoint": "./mistral/21-04-24-Weni-WeniGPT-Agents-Mistral-1.0.0-SFT-1.0.16-DPO_Experiment on DPO with other hyperparameters and best SFT model of WeniGPT-2_max_steps-180_batch_8_2024-04-21_ppid_9/checkpoint-180", |
|
"epoch": 5.806451612903226, |
|
"eval_steps": 30, |
|
"global_step": 180, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.3225806451612903, |
|
"grad_norm": 23.757038116455078, |
|
"learning_rate": 4.971264367816092e-06, |
|
"logits/chosen": -1.8281418085098267, |
|
"logits/rejected": -1.8474775552749634, |
|
"logps/chosen": -128.32241821289062, |
|
"logps/rejected": -168.0380401611328, |
|
"loss": 0.6742, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.024183962494134903, |
|
"rewards/margins": 0.03792798891663551, |
|
"rewards/rejected": -0.013744029216468334, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.6451612903225806, |
|
"grad_norm": 14.753284454345703, |
|
"learning_rate": 4.683908045977012e-06, |
|
"logits/chosen": -1.7773618698120117, |
|
"logits/rejected": -1.780083417892456, |
|
"logps/chosen": -144.4004364013672, |
|
"logps/rejected": -171.8882598876953, |
|
"loss": 0.5367, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.282940536737442, |
|
"rewards/margins": 0.31420594453811646, |
|
"rewards/rejected": -0.03126540407538414, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.967741935483871, |
|
"grad_norm": 13.17676067352295, |
|
"learning_rate": 4.396551724137931e-06, |
|
"logits/chosen": -1.8475195169448853, |
|
"logits/rejected": -1.8598411083221436, |
|
"logps/chosen": -166.5201416015625, |
|
"logps/rejected": -198.87669372558594, |
|
"loss": 0.3994, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.7265206575393677, |
|
"rewards/margins": 1.12095308303833, |
|
"rewards/rejected": -0.3944324254989624, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.967741935483871, |
|
"eval_logits/chosen": -1.7065293788909912, |
|
"eval_logits/rejected": -1.7336915731430054, |
|
"eval_logps/chosen": -112.41703033447266, |
|
"eval_logps/rejected": -210.2833709716797, |
|
"eval_loss": 0.339741975069046, |
|
"eval_rewards/accuracies": 0.7142857313156128, |
|
"eval_rewards/chosen": 0.7506826519966125, |
|
"eval_rewards/margins": 0.770815372467041, |
|
"eval_rewards/rejected": -0.020132770761847496, |
|
"eval_runtime": 10.1682, |
|
"eval_samples_per_second": 2.754, |
|
"eval_steps_per_second": 0.688, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.2903225806451613, |
|
"grad_norm": 9.547523498535156, |
|
"learning_rate": 4.1091954022988515e-06, |
|
"logits/chosen": -1.7585010528564453, |
|
"logits/rejected": -1.7726383209228516, |
|
"logps/chosen": -123.3655014038086, |
|
"logps/rejected": -181.35037231445312, |
|
"loss": 0.2655, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 0.981993556022644, |
|
"rewards/margins": 1.3875294923782349, |
|
"rewards/rejected": -0.4055357575416565, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.6129032258064515, |
|
"grad_norm": 6.552857398986816, |
|
"learning_rate": 3.82183908045977e-06, |
|
"logits/chosen": -1.7593097686767578, |
|
"logits/rejected": -1.7726491689682007, |
|
"logps/chosen": -79.5435562133789, |
|
"logps/rejected": -169.55801391601562, |
|
"loss": 0.2323, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.1555945873260498, |
|
"rewards/margins": 1.6152770519256592, |
|
"rewards/rejected": -0.45968255400657654, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.935483870967742, |
|
"grad_norm": 5.082254409790039, |
|
"learning_rate": 3.5344827586206898e-06, |
|
"logits/chosen": -1.7814455032348633, |
|
"logits/rejected": -1.8014984130859375, |
|
"logps/chosen": -135.05734252929688, |
|
"logps/rejected": -220.71621704101562, |
|
"loss": 0.1771, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.483880877494812, |
|
"rewards/margins": 2.265058994293213, |
|
"rewards/rejected": -0.7811781167984009, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.935483870967742, |
|
"eval_logits/chosen": -1.7119187116622925, |
|
"eval_logits/rejected": -1.7429500818252563, |
|
"eval_logps/chosen": -106.78651428222656, |
|
"eval_logps/rejected": -215.82162475585938, |
|
"eval_loss": 0.17449094355106354, |
|
"eval_rewards/accuracies": 0.8571428656578064, |
|
"eval_rewards/chosen": 1.3137341737747192, |
|
"eval_rewards/margins": 1.8876904249191284, |
|
"eval_rewards/rejected": -0.5739563703536987, |
|
"eval_runtime": 10.1675, |
|
"eval_samples_per_second": 2.754, |
|
"eval_steps_per_second": 0.688, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.258064516129032, |
|
"grad_norm": 3.0963690280914307, |
|
"learning_rate": 3.2758620689655175e-06, |
|
"logits/chosen": -1.8137136697769165, |
|
"logits/rejected": -1.831018090248108, |
|
"logps/chosen": -127.5184555053711, |
|
"logps/rejected": -193.3052215576172, |
|
"loss": 0.144, |
|
"rewards/accuracies": 0.949999988079071, |
|
"rewards/chosen": 1.2200127840042114, |
|
"rewards/margins": 2.3993964195251465, |
|
"rewards/rejected": -1.179383635520935, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.5806451612903225, |
|
"grad_norm": 4.23032808303833, |
|
"learning_rate": 2.988505747126437e-06, |
|
"logits/chosen": -1.8039875030517578, |
|
"logits/rejected": -1.8293946981430054, |
|
"logps/chosen": -109.42149353027344, |
|
"logps/rejected": -206.9613494873047, |
|
"loss": 0.1129, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.4697037935256958, |
|
"rewards/margins": 2.851051092147827, |
|
"rewards/rejected": -1.3813471794128418, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.903225806451613, |
|
"grad_norm": 6.72817850112915, |
|
"learning_rate": 2.7011494252873567e-06, |
|
"logits/chosen": -1.814522385597229, |
|
"logits/rejected": -1.8382046222686768, |
|
"logps/chosen": -186.84329223632812, |
|
"logps/rejected": -260.76788330078125, |
|
"loss": 0.1314, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.1758986711502075, |
|
"rewards/margins": 2.6401941776275635, |
|
"rewards/rejected": -1.4642951488494873, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.903225806451613, |
|
"eval_logits/chosen": -1.7031118869781494, |
|
"eval_logits/rejected": -1.7357984781265259, |
|
"eval_logps/chosen": -105.27674102783203, |
|
"eval_logps/rejected": -222.73390197753906, |
|
"eval_loss": 0.11280059069395065, |
|
"eval_rewards/accuracies": 0.8571428656578064, |
|
"eval_rewards/chosen": 1.4647114276885986, |
|
"eval_rewards/margins": 2.7298948764801025, |
|
"eval_rewards/rejected": -1.2651835680007935, |
|
"eval_runtime": 10.1661, |
|
"eval_samples_per_second": 2.754, |
|
"eval_steps_per_second": 0.689, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.225806451612903, |
|
"grad_norm": 3.741824150085449, |
|
"learning_rate": 2.4137931034482762e-06, |
|
"logits/chosen": -1.9151725769042969, |
|
"logits/rejected": -1.929137945175171, |
|
"logps/chosen": -195.36695861816406, |
|
"logps/rejected": -271.27081298828125, |
|
"loss": 0.1269, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.3293390274047852, |
|
"rewards/margins": 3.3184096813201904, |
|
"rewards/rejected": -1.9890711307525635, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.5483870967741935, |
|
"grad_norm": 3.6807944774627686, |
|
"learning_rate": 2.1264367816091954e-06, |
|
"logits/chosen": -1.7386070489883423, |
|
"logits/rejected": -1.766031265258789, |
|
"logps/chosen": -120.89439392089844, |
|
"logps/rejected": -234.75289916992188, |
|
"loss": 0.08, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.6623188257217407, |
|
"rewards/margins": 4.070118427276611, |
|
"rewards/rejected": -2.4078001976013184, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.870967741935484, |
|
"grad_norm": 4.077445030212402, |
|
"learning_rate": 1.839080459770115e-06, |
|
"logits/chosen": -1.8381446599960327, |
|
"logits/rejected": -1.848647117614746, |
|
"logps/chosen": -162.987548828125, |
|
"logps/rejected": -245.94497680664062, |
|
"loss": 0.1016, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.698729157447815, |
|
"rewards/margins": 3.5379676818847656, |
|
"rewards/rejected": -1.8392385244369507, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.870967741935484, |
|
"eval_logits/chosen": -1.6965323686599731, |
|
"eval_logits/rejected": -1.729313850402832, |
|
"eval_logps/chosen": -104.65281677246094, |
|
"eval_logps/rejected": -229.02908325195312, |
|
"eval_loss": 0.06442112475633621, |
|
"eval_rewards/accuracies": 1.0, |
|
"eval_rewards/chosen": 1.527103066444397, |
|
"eval_rewards/margins": 3.4218084812164307, |
|
"eval_rewards/rejected": -1.8947057723999023, |
|
"eval_runtime": 10.1682, |
|
"eval_samples_per_second": 2.754, |
|
"eval_steps_per_second": 0.688, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.193548387096774, |
|
"grad_norm": 2.3105103969573975, |
|
"learning_rate": 1.5517241379310346e-06, |
|
"logits/chosen": -1.8013200759887695, |
|
"logits/rejected": -1.817307472229004, |
|
"logps/chosen": -132.51260375976562, |
|
"logps/rejected": -271.40521240234375, |
|
"loss": 0.0683, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.6829872131347656, |
|
"rewards/margins": 4.633041858673096, |
|
"rewards/rejected": -2.950054407119751, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 4.516129032258064, |
|
"grad_norm": 2.9849331378936768, |
|
"learning_rate": 1.2643678160919542e-06, |
|
"logits/chosen": -1.8443206548690796, |
|
"logits/rejected": -1.8669426441192627, |
|
"logps/chosen": -173.345947265625, |
|
"logps/rejected": -228.7808380126953, |
|
"loss": 0.0546, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.7071704864501953, |
|
"rewards/margins": 3.6590206623077393, |
|
"rewards/rejected": -1.9518499374389648, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.838709677419355, |
|
"grad_norm": 1.6962618827819824, |
|
"learning_rate": 9.770114942528738e-07, |
|
"logits/chosen": -1.8463163375854492, |
|
"logits/rejected": -1.8704414367675781, |
|
"logps/chosen": -127.43244934082031, |
|
"logps/rejected": -244.4454803466797, |
|
"loss": 0.0633, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.2895654439926147, |
|
"rewards/margins": 4.082577705383301, |
|
"rewards/rejected": -2.7930119037628174, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.838709677419355, |
|
"eval_logits/chosen": -1.692850947380066, |
|
"eval_logits/rejected": -1.7251230478286743, |
|
"eval_logps/chosen": -105.38704681396484, |
|
"eval_logps/rejected": -233.88169860839844, |
|
"eval_loss": 0.04773273691534996, |
|
"eval_rewards/accuracies": 1.0, |
|
"eval_rewards/chosen": 1.4536798000335693, |
|
"eval_rewards/margins": 3.8336431980133057, |
|
"eval_rewards/rejected": -2.3799636363983154, |
|
"eval_runtime": 10.1649, |
|
"eval_samples_per_second": 2.755, |
|
"eval_steps_per_second": 0.689, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 5.161290322580645, |
|
"grad_norm": 1.0977122783660889, |
|
"learning_rate": 6.896551724137931e-07, |
|
"logits/chosen": -1.854543924331665, |
|
"logits/rejected": -1.8709430694580078, |
|
"logps/chosen": -124.8465347290039, |
|
"logps/rejected": -214.95779418945312, |
|
"loss": 0.0374, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.6399686336517334, |
|
"rewards/margins": 4.3664116859436035, |
|
"rewards/rejected": -2.72644305229187, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 5.483870967741936, |
|
"grad_norm": 0.5631425976753235, |
|
"learning_rate": 4.0229885057471266e-07, |
|
"logits/chosen": -1.882641077041626, |
|
"logits/rejected": -1.8940929174423218, |
|
"logps/chosen": -163.1198272705078, |
|
"logps/rejected": -238.10739135742188, |
|
"loss": 0.0451, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.1276298761367798, |
|
"rewards/margins": 3.9089503288269043, |
|
"rewards/rejected": -2.781320333480835, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 5.806451612903226, |
|
"grad_norm": 1.9172598123550415, |
|
"learning_rate": 1.1494252873563219e-07, |
|
"logits/chosen": -1.7833986282348633, |
|
"logits/rejected": -1.8089481592178345, |
|
"logps/chosen": -177.43270874023438, |
|
"logps/rejected": -286.78851318359375, |
|
"loss": 0.0497, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 1.6389026641845703, |
|
"rewards/margins": 4.935039520263672, |
|
"rewards/rejected": -3.2961373329162598, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 5.806451612903226, |
|
"eval_logits/chosen": -1.6861690282821655, |
|
"eval_logits/rejected": -1.7177609205245972, |
|
"eval_logps/chosen": -105.73729705810547, |
|
"eval_logps/rejected": -235.87796020507812, |
|
"eval_loss": 0.040502406656742096, |
|
"eval_rewards/accuracies": 1.0, |
|
"eval_rewards/chosen": 1.4186561107635498, |
|
"eval_rewards/margins": 3.9982476234436035, |
|
"eval_rewards/rejected": -2.579591751098633, |
|
"eval_runtime": 10.1675, |
|
"eval_samples_per_second": 2.754, |
|
"eval_steps_per_second": 0.688, |
|
"step": 180 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 180, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 6, |
|
"save_steps": 90, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|