|
{ |
|
"best_metric": 0.3756321966648102, |
|
"best_model_checkpoint": "./mistral/20-04-24-Weni-WeniGPT-Agents-Mistral-1.0.6-SFT-1.0.7-DPO_Experiment on DPO with other hyperparameters and best SFT model of WeniGPT-2_max_steps-1470_batch_1_2024-04-20_ppid_9/checkpoint-180", |
|
"epoch": 1.836734693877551, |
|
"eval_steps": 30, |
|
"global_step": 450, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 83.44550323486328, |
|
"learning_rate": 6.666666666666667e-07, |
|
"logits/chosen": -1.7825796604156494, |
|
"logits/rejected": -1.7886196374893188, |
|
"logps/chosen": -169.71481323242188, |
|
"logps/rejected": -167.6764373779297, |
|
"loss": 0.6937, |
|
"rewards/accuracies": 0.20000000298023224, |
|
"rewards/chosen": -0.0002815247280523181, |
|
"rewards/margins": -0.0010146332206204534, |
|
"rewards/rejected": 0.0007331084925681353, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 87.56982421875, |
|
"learning_rate": 1.777777777777778e-06, |
|
"logits/chosen": -1.623265027999878, |
|
"logits/rejected": -1.6968040466308594, |
|
"logps/chosen": -128.7312469482422, |
|
"logps/rejected": -184.20187377929688, |
|
"loss": 0.6912, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.0030528828501701355, |
|
"rewards/margins": 0.004004458896815777, |
|
"rewards/rejected": -0.0009515761630609632, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 113.4741439819336, |
|
"learning_rate": 2.888888888888889e-06, |
|
"logits/chosen": -1.7152118682861328, |
|
"logits/rejected": -1.758681058883667, |
|
"logps/chosen": -146.91494750976562, |
|
"logps/rejected": -213.5966339111328, |
|
"loss": 0.6781, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.03251449763774872, |
|
"rewards/margins": 0.030697176232933998, |
|
"rewards/rejected": 0.0018173219868913293, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_logits/chosen": -1.795080304145813, |
|
"eval_logits/rejected": -1.8307874202728271, |
|
"eval_logps/chosen": -194.3749542236328, |
|
"eval_logps/rejected": -243.1332244873047, |
|
"eval_loss": 0.6762310266494751, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": 0.05039369314908981, |
|
"eval_rewards/margins": 0.03467999026179314, |
|
"eval_rewards/rejected": 0.01571369729936123, |
|
"eval_runtime": 26.2365, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 50.695308685302734, |
|
"learning_rate": 4.000000000000001e-06, |
|
"logits/chosen": -1.9357620477676392, |
|
"logits/rejected": -1.9375683069229126, |
|
"logps/chosen": -183.8218231201172, |
|
"logps/rejected": -115.80632019042969, |
|
"loss": 0.6768, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.056698836386203766, |
|
"rewards/margins": 0.03413978964090347, |
|
"rewards/rejected": 0.02255905233323574, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 71.6834716796875, |
|
"learning_rate": 4.996491228070176e-06, |
|
"logits/chosen": -1.7773185968399048, |
|
"logits/rejected": -1.8331743478775024, |
|
"logps/chosen": -167.78533935546875, |
|
"logps/rejected": -227.0582733154297, |
|
"loss": 0.6205, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.1449195146560669, |
|
"rewards/margins": 0.15578031539916992, |
|
"rewards/rejected": -0.010860783979296684, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 93.83502197265625, |
|
"learning_rate": 4.96140350877193e-06, |
|
"logits/chosen": -1.7739536762237549, |
|
"logits/rejected": -1.8414790630340576, |
|
"logps/chosen": -153.57481384277344, |
|
"logps/rejected": -194.3485870361328, |
|
"loss": 0.5918, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.2464807778596878, |
|
"rewards/margins": 0.22958576679229736, |
|
"rewards/rejected": 0.016895027831196785, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_logits/chosen": -1.7974774837493896, |
|
"eval_logits/rejected": -1.8332849740982056, |
|
"eval_logps/chosen": -193.71743774414062, |
|
"eval_logps/rejected": -243.05783081054688, |
|
"eval_loss": 0.5998325943946838, |
|
"eval_rewards/accuracies": 0.7857142686843872, |
|
"eval_rewards/chosen": 0.24764502048492432, |
|
"eval_rewards/margins": 0.20930658280849457, |
|
"eval_rewards/rejected": 0.03833846375346184, |
|
"eval_runtime": 26.2349, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 109.71602630615234, |
|
"learning_rate": 4.926315789473685e-06, |
|
"logits/chosen": -1.8950660228729248, |
|
"logits/rejected": -1.9148075580596924, |
|
"logps/chosen": -169.89028930664062, |
|
"logps/rejected": -177.87301635742188, |
|
"loss": 0.5992, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.15271134674549103, |
|
"rewards/margins": 0.21031668782234192, |
|
"rewards/rejected": -0.0576053187251091, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 90.3881607055664, |
|
"learning_rate": 4.8912280701754386e-06, |
|
"logits/chosen": -1.798600196838379, |
|
"logits/rejected": -1.855521559715271, |
|
"logps/chosen": -160.3217315673828, |
|
"logps/rejected": -247.6388397216797, |
|
"loss": 0.5551, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.29141485691070557, |
|
"rewards/margins": 0.3369868993759155, |
|
"rewards/rejected": -0.04557197913527489, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 147.27755737304688, |
|
"learning_rate": 4.856140350877193e-06, |
|
"logits/chosen": -1.8193047046661377, |
|
"logits/rejected": -1.8773219585418701, |
|
"logps/chosen": -155.37123107910156, |
|
"logps/rejected": -245.6790008544922, |
|
"loss": 0.4932, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.5351110696792603, |
|
"rewards/margins": 0.5927553176879883, |
|
"rewards/rejected": -0.05764435604214668, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_logits/chosen": -1.8004387617111206, |
|
"eval_logits/rejected": -1.836446762084961, |
|
"eval_logps/chosen": -192.66908264160156, |
|
"eval_logps/rejected": -242.95899963378906, |
|
"eval_loss": 0.5072147846221924, |
|
"eval_rewards/accuracies": 0.8214285969734192, |
|
"eval_rewards/chosen": 0.5621528029441833, |
|
"eval_rewards/margins": 0.4941706657409668, |
|
"eval_rewards/rejected": 0.06798211485147476, |
|
"eval_runtime": 26.2422, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 62.75928497314453, |
|
"learning_rate": 4.821052631578948e-06, |
|
"logits/chosen": -1.7835988998413086, |
|
"logits/rejected": -1.8885362148284912, |
|
"logps/chosen": -109.94624328613281, |
|
"logps/rejected": -232.5241241455078, |
|
"loss": 0.4252, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.5394761562347412, |
|
"rewards/margins": 0.787738025188446, |
|
"rewards/rejected": -0.248261958360672, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 84.42039489746094, |
|
"learning_rate": 4.785964912280702e-06, |
|
"logits/chosen": -1.7081111669540405, |
|
"logits/rejected": -1.7244077920913696, |
|
"logps/chosen": -92.51998138427734, |
|
"logps/rejected": -135.5617218017578, |
|
"loss": 0.5077, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.6028972864151001, |
|
"rewards/margins": 0.511993408203125, |
|
"rewards/rejected": 0.09090389311313629, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 26.61499786376953, |
|
"learning_rate": 4.754385964912281e-06, |
|
"logits/chosen": -1.9250166416168213, |
|
"logits/rejected": -1.9426968097686768, |
|
"logps/chosen": -152.0164337158203, |
|
"logps/rejected": -154.2359161376953, |
|
"loss": 0.4391, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 0.7938548922538757, |
|
"rewards/margins": 0.8368040919303894, |
|
"rewards/rejected": -0.042949117720127106, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_logits/chosen": -1.8051280975341797, |
|
"eval_logits/rejected": -1.8413046598434448, |
|
"eval_logps/chosen": -191.2982177734375, |
|
"eval_logps/rejected": -242.81198120117188, |
|
"eval_loss": 0.4335751235485077, |
|
"eval_rewards/accuracies": 0.7857142686843872, |
|
"eval_rewards/chosen": 0.9734136462211609, |
|
"eval_rewards/margins": 0.8613229990005493, |
|
"eval_rewards/rejected": 0.11209066957235336, |
|
"eval_runtime": 26.2345, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 90.6368637084961, |
|
"learning_rate": 4.7192982456140355e-06, |
|
"logits/chosen": -1.865144968032837, |
|
"logits/rejected": -1.9209120273590088, |
|
"logps/chosen": -149.78990173339844, |
|
"logps/rejected": -293.8749084472656, |
|
"loss": 0.4133, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.4554510712623596, |
|
"rewards/margins": 0.9285024404525757, |
|
"rewards/rejected": -0.47305139899253845, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 6.8597941398620605, |
|
"learning_rate": 4.68421052631579e-06, |
|
"logits/chosen": -1.8089487552642822, |
|
"logits/rejected": -1.8955650329589844, |
|
"logps/chosen": -100.26250457763672, |
|
"logps/rejected": -209.1724090576172, |
|
"loss": 0.3414, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.9389357566833496, |
|
"rewards/margins": 1.521246314048767, |
|
"rewards/rejected": -0.5823107361793518, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 79.9189453125, |
|
"learning_rate": 4.652631578947368e-06, |
|
"logits/chosen": -1.760371446609497, |
|
"logits/rejected": -1.8096182346343994, |
|
"logps/chosen": -193.62579345703125, |
|
"logps/rejected": -166.35800170898438, |
|
"loss": 0.3208, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 1.2600924968719482, |
|
"rewards/margins": 1.5845091342926025, |
|
"rewards/rejected": -0.32441645860671997, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_logits/chosen": -1.8129692077636719, |
|
"eval_logits/rejected": -1.849239706993103, |
|
"eval_logps/chosen": -189.88929748535156, |
|
"eval_logps/rejected": -242.91104125976562, |
|
"eval_loss": 0.39326220750808716, |
|
"eval_rewards/accuracies": 0.7857142686843872, |
|
"eval_rewards/chosen": 1.3960883617401123, |
|
"eval_rewards/margins": 1.3137177228927612, |
|
"eval_rewards/rejected": 0.08237060159444809, |
|
"eval_runtime": 26.3372, |
|
"eval_samples_per_second": 1.063, |
|
"eval_steps_per_second": 1.063, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 3.151426315307617, |
|
"learning_rate": 4.617543859649123e-06, |
|
"logits/chosen": -1.7185112237930298, |
|
"logits/rejected": -1.749427080154419, |
|
"logps/chosen": -178.8656463623047, |
|
"logps/rejected": -201.32858276367188, |
|
"loss": 0.4418, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 1.3884060382843018, |
|
"rewards/margins": 1.4542597532272339, |
|
"rewards/rejected": -0.06585375964641571, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 37.79099655151367, |
|
"learning_rate": 4.582456140350878e-06, |
|
"logits/chosen": -1.730787992477417, |
|
"logits/rejected": -1.8099721670150757, |
|
"logps/chosen": -158.50277709960938, |
|
"logps/rejected": -185.48129272460938, |
|
"loss": 0.3912, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 1.4253065586090088, |
|
"rewards/margins": 1.2418479919433594, |
|
"rewards/rejected": 0.18345877528190613, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 18.49593734741211, |
|
"learning_rate": 4.547368421052632e-06, |
|
"logits/chosen": -1.842390775680542, |
|
"logits/rejected": -1.8834030628204346, |
|
"logps/chosen": -125.84483337402344, |
|
"logps/rejected": -182.78854370117188, |
|
"loss": 0.3215, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 1.5408955812454224, |
|
"rewards/margins": 1.4677695035934448, |
|
"rewards/rejected": 0.07312598079442978, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_logits/chosen": -1.8194406032562256, |
|
"eval_logits/rejected": -1.8561830520629883, |
|
"eval_logps/chosen": -188.3820037841797, |
|
"eval_logps/rejected": -243.13536071777344, |
|
"eval_loss": 0.3756321966648102, |
|
"eval_rewards/accuracies": 0.7857142686843872, |
|
"eval_rewards/chosen": 1.8482797145843506, |
|
"eval_rewards/margins": 1.8331985473632812, |
|
"eval_rewards/rejected": 0.015081183984875679, |
|
"eval_runtime": 26.2381, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 106.56973266601562, |
|
"learning_rate": 4.512280701754386e-06, |
|
"logits/chosen": -1.8318649530410767, |
|
"logits/rejected": -1.8742272853851318, |
|
"logps/chosen": -197.66619873046875, |
|
"logps/rejected": -247.83560180664062, |
|
"loss": 0.3303, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 2.4889655113220215, |
|
"rewards/margins": 2.536695957183838, |
|
"rewards/rejected": -0.047730110585689545, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 1.9826362133026123, |
|
"learning_rate": 4.47719298245614e-06, |
|
"logits/chosen": -1.7579128742218018, |
|
"logits/rejected": -1.7890180349349976, |
|
"logps/chosen": -129.37191772460938, |
|
"logps/rejected": -166.67636108398438, |
|
"loss": 0.3292, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 2.5646984577178955, |
|
"rewards/margins": 2.6955490112304688, |
|
"rewards/rejected": -0.13085035979747772, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 6.777231693267822, |
|
"learning_rate": 4.442105263157896e-06, |
|
"logits/chosen": -1.7335002422332764, |
|
"logits/rejected": -1.8272641897201538, |
|
"logps/chosen": -114.22731018066406, |
|
"logps/rejected": -215.8419647216797, |
|
"loss": 0.0817, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 3.278646469116211, |
|
"rewards/margins": 4.009958267211914, |
|
"rewards/rejected": -0.7313117980957031, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_logits/chosen": -1.826608419418335, |
|
"eval_logits/rejected": -1.8640671968460083, |
|
"eval_logps/chosen": -186.82992553710938, |
|
"eval_logps/rejected": -243.80209350585938, |
|
"eval_loss": 0.3834719657897949, |
|
"eval_rewards/accuracies": 0.7857142686843872, |
|
"eval_rewards/chosen": 2.3139071464538574, |
|
"eval_rewards/margins": 2.4988515377044678, |
|
"eval_rewards/rejected": -0.1849442571401596, |
|
"eval_runtime": 26.2309, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 1.0230931043624878, |
|
"learning_rate": 4.40701754385965e-06, |
|
"logits/chosen": -1.7561031579971313, |
|
"logits/rejected": -1.8601219654083252, |
|
"logps/chosen": -134.11428833007812, |
|
"logps/rejected": -273.5272216796875, |
|
"loss": 0.1796, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 1.9340410232543945, |
|
"rewards/margins": 3.8275578022003174, |
|
"rewards/rejected": -1.8935168981552124, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 54.65635299682617, |
|
"learning_rate": 4.371929824561404e-06, |
|
"logits/chosen": -1.8512550592422485, |
|
"logits/rejected": -1.900559663772583, |
|
"logps/chosen": -210.9802703857422, |
|
"logps/rejected": -240.60269165039062, |
|
"loss": 0.5004, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 1.5826597213745117, |
|
"rewards/margins": 2.925461530685425, |
|
"rewards/rejected": -1.342801809310913, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 18.797134399414062, |
|
"learning_rate": 4.336842105263158e-06, |
|
"logits/chosen": -1.7781873941421509, |
|
"logits/rejected": -1.8780953884124756, |
|
"logps/chosen": -136.18722534179688, |
|
"logps/rejected": -257.26495361328125, |
|
"loss": 0.137, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 1.9192733764648438, |
|
"rewards/margins": 3.8388113975524902, |
|
"rewards/rejected": -1.9195384979248047, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_logits/chosen": -1.83432936668396, |
|
"eval_logits/rejected": -1.8722076416015625, |
|
"eval_logps/chosen": -185.8831329345703, |
|
"eval_logps/rejected": -244.8594207763672, |
|
"eval_loss": 0.41323208808898926, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": 2.5979421138763428, |
|
"eval_rewards/margins": 3.1000888347625732, |
|
"eval_rewards/rejected": -0.5021467208862305, |
|
"eval_runtime": 26.2323, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"grad_norm": 132.51947021484375, |
|
"learning_rate": 4.301754385964912e-06, |
|
"logits/chosen": -1.8787914514541626, |
|
"logits/rejected": -1.9520515203475952, |
|
"logps/chosen": -186.46717834472656, |
|
"logps/rejected": -250.008056640625, |
|
"loss": 0.3531, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 2.192800283432007, |
|
"rewards/margins": 3.485269069671631, |
|
"rewards/rejected": -1.2924686670303345, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"grad_norm": 6.529983043670654, |
|
"learning_rate": 4.266666666666668e-06, |
|
"logits/chosen": -1.7029941082000732, |
|
"logits/rejected": -1.7871618270874023, |
|
"logps/chosen": -128.5960235595703, |
|
"logps/rejected": -209.27603149414062, |
|
"loss": 0.0883, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 2.7405991554260254, |
|
"rewards/margins": 5.148273468017578, |
|
"rewards/rejected": -2.4076738357543945, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"grad_norm": 0.27644646167755127, |
|
"learning_rate": 4.2315789473684215e-06, |
|
"logits/chosen": -1.7499496936798096, |
|
"logits/rejected": -1.8194109201431274, |
|
"logps/chosen": -119.34013366699219, |
|
"logps/rejected": -230.69229125976562, |
|
"loss": 0.0997, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 4.146358013153076, |
|
"rewards/margins": 5.72900390625, |
|
"rewards/rejected": -1.5826464891433716, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_logits/chosen": -1.842972755432129, |
|
"eval_logits/rejected": -1.8816190958023071, |
|
"eval_logps/chosen": -185.4148406982422, |
|
"eval_logps/rejected": -246.53672790527344, |
|
"eval_loss": 0.46570485830307007, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": 2.7384250164031982, |
|
"eval_rewards/margins": 3.743762969970703, |
|
"eval_rewards/rejected": -1.0053375959396362, |
|
"eval_runtime": 26.2348, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"grad_norm": 31.16436195373535, |
|
"learning_rate": 4.196491228070176e-06, |
|
"logits/chosen": -1.8344509601593018, |
|
"logits/rejected": -1.919553518295288, |
|
"logps/chosen": -131.51156616210938, |
|
"logps/rejected": -246.3345184326172, |
|
"loss": 0.138, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 2.982485771179199, |
|
"rewards/margins": 6.261662483215332, |
|
"rewards/rejected": -3.2791759967803955, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 0.08183232694864273, |
|
"learning_rate": 4.16140350877193e-06, |
|
"logits/chosen": -1.847378134727478, |
|
"logits/rejected": -1.8774774074554443, |
|
"logps/chosen": -127.52336120605469, |
|
"logps/rejected": -157.3633270263672, |
|
"loss": 0.3883, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.7564169764518738, |
|
"rewards/margins": 2.541962146759033, |
|
"rewards/rejected": -1.7855453491210938, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 10.61989974975586, |
|
"learning_rate": 4.126315789473685e-06, |
|
"logits/chosen": -1.7770633697509766, |
|
"logits/rejected": -1.8337452411651611, |
|
"logps/chosen": -112.7281265258789, |
|
"logps/rejected": -195.87020874023438, |
|
"loss": 0.0432, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 3.176725387573242, |
|
"rewards/margins": 6.761993408203125, |
|
"rewards/rejected": -3.5852675437927246, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"eval_logits/chosen": -1.849483609199524, |
|
"eval_logits/rejected": -1.888432502746582, |
|
"eval_logps/chosen": -185.52931213378906, |
|
"eval_logps/rejected": -248.10926818847656, |
|
"eval_loss": 0.501145601272583, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": 2.704090118408203, |
|
"eval_rewards/margins": 4.181191921234131, |
|
"eval_rewards/rejected": -1.477101445198059, |
|
"eval_runtime": 26.2437, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"grad_norm": 327.02899169921875, |
|
"learning_rate": 4.091228070175439e-06, |
|
"logits/chosen": -1.8802036046981812, |
|
"logits/rejected": -1.9435373544692993, |
|
"logps/chosen": -164.2046661376953, |
|
"logps/rejected": -269.22235107421875, |
|
"loss": 0.4088, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 3.3527073860168457, |
|
"rewards/margins": 5.9436140060424805, |
|
"rewards/rejected": -2.5909066200256348, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"grad_norm": 6.565308094024658, |
|
"learning_rate": 4.056140350877193e-06, |
|
"logits/chosen": -1.9250032901763916, |
|
"logits/rejected": -1.9860813617706299, |
|
"logps/chosen": -197.4870147705078, |
|
"logps/rejected": -207.25009155273438, |
|
"loss": 0.4969, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 0.32615354657173157, |
|
"rewards/margins": 2.2872400283813477, |
|
"rewards/rejected": -1.9610865116119385, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"grad_norm": 33.34074020385742, |
|
"learning_rate": 4.021052631578948e-06, |
|
"logits/chosen": -1.8470687866210938, |
|
"logits/rejected": -1.8675216436386108, |
|
"logps/chosen": -117.5759506225586, |
|
"logps/rejected": -151.83091735839844, |
|
"loss": 0.1819, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 2.878262758255005, |
|
"rewards/margins": 5.000906467437744, |
|
"rewards/rejected": -2.12264347076416, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"eval_logits/chosen": -1.8486573696136475, |
|
"eval_logits/rejected": -1.8877869844436646, |
|
"eval_logps/chosen": -185.54177856445312, |
|
"eval_logps/rejected": -249.2687530517578, |
|
"eval_loss": 0.478495329618454, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": 2.7003517150878906, |
|
"eval_rewards/margins": 4.525294780731201, |
|
"eval_rewards/rejected": -1.8249431848526, |
|
"eval_runtime": 26.2362, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"grad_norm": 0.6777727603912354, |
|
"learning_rate": 3.985964912280702e-06, |
|
"logits/chosen": -1.8584426641464233, |
|
"logits/rejected": -1.884598970413208, |
|
"logps/chosen": -78.69309997558594, |
|
"logps/rejected": -126.11468505859375, |
|
"loss": 0.3154, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 2.5596954822540283, |
|
"rewards/margins": 4.089261054992676, |
|
"rewards/rejected": -1.5295660495758057, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"grad_norm": 45.252723693847656, |
|
"learning_rate": 3.950877192982457e-06, |
|
"logits/chosen": -1.8422210216522217, |
|
"logits/rejected": -1.8741470575332642, |
|
"logps/chosen": -198.17526245117188, |
|
"logps/rejected": -179.26759338378906, |
|
"loss": 0.2128, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 2.886838912963867, |
|
"rewards/margins": 4.228543758392334, |
|
"rewards/rejected": -1.3417048454284668, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"grad_norm": 0.0007244591251946986, |
|
"learning_rate": 3.9157894736842104e-06, |
|
"logits/chosen": -1.8951257467269897, |
|
"logits/rejected": -1.9684120416641235, |
|
"logps/chosen": -123.7894515991211, |
|
"logps/rejected": -278.1185607910156, |
|
"loss": 0.0169, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 3.9708969593048096, |
|
"rewards/margins": 9.946398735046387, |
|
"rewards/rejected": -5.975502014160156, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"eval_logits/chosen": -1.8510137796401978, |
|
"eval_logits/rejected": -1.8906936645507812, |
|
"eval_logps/chosen": -185.66189575195312, |
|
"eval_logps/rejected": -250.37806701660156, |
|
"eval_loss": 0.4872412383556366, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": 2.664308547973633, |
|
"eval_rewards/margins": 4.822035789489746, |
|
"eval_rewards/rejected": -2.157727003097534, |
|
"eval_runtime": 26.2354, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"grad_norm": 0.001088384771719575, |
|
"learning_rate": 3.880701754385965e-06, |
|
"logits/chosen": -1.859140157699585, |
|
"logits/rejected": -1.903721809387207, |
|
"logps/chosen": -171.89047241210938, |
|
"logps/rejected": -257.6295471191406, |
|
"loss": 0.237, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 2.257797956466675, |
|
"rewards/margins": 7.423349857330322, |
|
"rewards/rejected": -5.165551662445068, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"grad_norm": 13.049817085266113, |
|
"learning_rate": 3.84561403508772e-06, |
|
"logits/chosen": -1.850102186203003, |
|
"logits/rejected": -1.922501564025879, |
|
"logps/chosen": -145.50692749023438, |
|
"logps/rejected": -221.7799530029297, |
|
"loss": 0.2906, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 1.9433931112289429, |
|
"rewards/margins": 6.384174346923828, |
|
"rewards/rejected": -4.440781593322754, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"grad_norm": 0.006105802953243256, |
|
"learning_rate": 3.810526315789474e-06, |
|
"logits/chosen": -1.9277305603027344, |
|
"logits/rejected": -1.9736566543579102, |
|
"logps/chosen": -140.36146545410156, |
|
"logps/rejected": -188.66836547851562, |
|
"loss": 0.235, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 2.6781504154205322, |
|
"rewards/margins": 5.825950622558594, |
|
"rewards/rejected": -3.1477997303009033, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"eval_logits/chosen": -1.8531960248947144, |
|
"eval_logits/rejected": -1.8930160999298096, |
|
"eval_logps/chosen": -185.68800354003906, |
|
"eval_logps/rejected": -251.1302032470703, |
|
"eval_loss": 0.48860952258110046, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": 2.656480550765991, |
|
"eval_rewards/margins": 5.039853572845459, |
|
"eval_rewards/rejected": -2.3833730220794678, |
|
"eval_runtime": 26.2308, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"grad_norm": 0.00030716226319782436, |
|
"learning_rate": 3.7754385964912284e-06, |
|
"logits/chosen": -1.8817369937896729, |
|
"logits/rejected": -1.9124549627304077, |
|
"logps/chosen": -171.40383911132812, |
|
"logps/rejected": -198.42446899414062, |
|
"loss": 0.4322, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 1.219795823097229, |
|
"rewards/margins": 6.402298927307129, |
|
"rewards/rejected": -5.182502269744873, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"grad_norm": 0.0007249619229696691, |
|
"learning_rate": 3.7403508771929827e-06, |
|
"logits/chosen": -1.8018410205841064, |
|
"logits/rejected": -1.869511365890503, |
|
"logps/chosen": -168.9734649658203, |
|
"logps/rejected": -223.9024658203125, |
|
"loss": 0.9332, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": 2.475252151489258, |
|
"rewards/margins": 5.069212913513184, |
|
"rewards/rejected": -2.593961238861084, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"grad_norm": 46.79059982299805, |
|
"learning_rate": 3.7052631578947374e-06, |
|
"logits/chosen": -1.774857759475708, |
|
"logits/rejected": -1.8125171661376953, |
|
"logps/chosen": -140.12551879882812, |
|
"logps/rejected": -208.7825927734375, |
|
"loss": 0.7551, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 2.244128704071045, |
|
"rewards/margins": 6.2417426109313965, |
|
"rewards/rejected": -3.997614622116089, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"eval_logits/chosen": -1.8527201414108276, |
|
"eval_logits/rejected": -1.892095923423767, |
|
"eval_logps/chosen": -185.46646118164062, |
|
"eval_logps/rejected": -251.00816345214844, |
|
"eval_loss": 0.438032329082489, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": 2.7229368686676025, |
|
"eval_rewards/margins": 5.069703102111816, |
|
"eval_rewards/rejected": -2.346766233444214, |
|
"eval_runtime": 26.2319, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 0.1679692417383194, |
|
"learning_rate": 3.6701754385964917e-06, |
|
"logits/chosen": -1.9766556024551392, |
|
"logits/rejected": -1.967247724533081, |
|
"logps/chosen": -181.25978088378906, |
|
"logps/rejected": -174.0313262939453, |
|
"loss": 0.5746, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 2.037475347518921, |
|
"rewards/margins": 4.661454200744629, |
|
"rewards/rejected": -2.623978853225708, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 0.028290700167417526, |
|
"learning_rate": 3.635087719298246e-06, |
|
"logits/chosen": -1.7324810028076172, |
|
"logits/rejected": -1.8381952047348022, |
|
"logps/chosen": -118.8620376586914, |
|
"logps/rejected": -249.5945587158203, |
|
"loss": 0.1919, |
|
"rewards/accuracies": 0.8999999761581421, |
|
"rewards/chosen": 2.84242582321167, |
|
"rewards/margins": 8.061222076416016, |
|
"rewards/rejected": -5.218796730041504, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"grad_norm": 3.435687303543091, |
|
"learning_rate": 3.6000000000000003e-06, |
|
"logits/chosen": -1.788468599319458, |
|
"logits/rejected": -1.8786054849624634, |
|
"logps/chosen": -127.3366928100586, |
|
"logps/rejected": -223.4063262939453, |
|
"loss": 0.134, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.9258162379264832, |
|
"rewards/margins": 6.352460861206055, |
|
"rewards/rejected": -5.426644802093506, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"eval_logits/chosen": -1.8530861139297485, |
|
"eval_logits/rejected": -1.8925215005874634, |
|
"eval_logps/chosen": -185.6543426513672, |
|
"eval_logps/rejected": -251.7077178955078, |
|
"eval_loss": 0.4383006691932678, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": 2.6665782928466797, |
|
"eval_rewards/margins": 5.223209857940674, |
|
"eval_rewards/rejected": -2.556631565093994, |
|
"eval_runtime": 26.2389, |
|
"eval_samples_per_second": 1.067, |
|
"eval_steps_per_second": 1.067, |
|
"step": 450 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1470, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 6, |
|
"save_steps": 90, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|