|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9984, |
|
"eval_steps": 500, |
|
"global_step": 312, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.5625e-07, |
|
"logits/chosen": 0.184401273727417, |
|
"logits/rejected": 0.45644012093544006, |
|
"logps/chosen": -431.40997314453125, |
|
"logps/rejected": -374.7772521972656, |
|
"loss": 0.1668, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.5625e-06, |
|
"logits/chosen": 0.19659145176410675, |
|
"logits/rejected": 0.16923505067825317, |
|
"logps/chosen": -399.8372802734375, |
|
"logps/rejected": -407.80474853515625, |
|
"loss": 0.2135, |
|
"rewards/accuracies": 0.3055555522441864, |
|
"rewards/chosen": -0.001171681797131896, |
|
"rewards/margins": -4.31987973570358e-05, |
|
"rewards/rejected": -0.001128483098000288, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.125e-06, |
|
"logits/chosen": 0.19973915815353394, |
|
"logits/rejected": 0.2067337930202484, |
|
"logps/chosen": -425.3497619628906, |
|
"logps/rejected": -433.87921142578125, |
|
"loss": 0.2124, |
|
"rewards/accuracies": 0.38749998807907104, |
|
"rewards/chosen": -0.0007560320082120597, |
|
"rewards/margins": 8.589267963543534e-06, |
|
"rewards/rejected": -0.0007646213052794337, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.6875000000000004e-06, |
|
"logits/chosen": 0.18633976578712463, |
|
"logits/rejected": 0.30335497856140137, |
|
"logps/chosen": -459.7779235839844, |
|
"logps/rejected": -473.452392578125, |
|
"loss": 0.1993, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": -0.0007412955746985972, |
|
"rewards/margins": 0.00032791445846669376, |
|
"rewards/rejected": -0.0010692101204767823, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.989935734988098e-06, |
|
"logits/chosen": 0.13782885670661926, |
|
"logits/rejected": 0.31285157799720764, |
|
"logps/chosen": -400.82794189453125, |
|
"logps/rejected": -394.58905029296875, |
|
"loss": 0.2101, |
|
"rewards/accuracies": 0.4312500059604645, |
|
"rewards/chosen": -0.0014155835378915071, |
|
"rewards/margins": 0.0005916984518989921, |
|
"rewards/rejected": -0.0020072818733751774, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 4.949188496058089e-06, |
|
"logits/chosen": 0.1621820479631424, |
|
"logits/rejected": 0.3057764172554016, |
|
"logps/chosen": -386.84515380859375, |
|
"logps/rejected": -372.78460693359375, |
|
"loss": 0.1983, |
|
"rewards/accuracies": 0.4000000059604645, |
|
"rewards/chosen": -0.003419263754040003, |
|
"rewards/margins": 0.0011660968884825706, |
|
"rewards/rejected": -0.0045853606425225735, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.8776412907378845e-06, |
|
"logits/chosen": 0.12111353874206543, |
|
"logits/rejected": 0.23442435264587402, |
|
"logps/chosen": -416.0478515625, |
|
"logps/rejected": -421.8002014160156, |
|
"loss": 0.2125, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.0044263326562941074, |
|
"rewards/margins": 0.0019439695170149207, |
|
"rewards/rejected": -0.006370303221046925, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.7761938666470405e-06, |
|
"logits/chosen": 0.18056654930114746, |
|
"logits/rejected": 0.24711184203624725, |
|
"logps/chosen": -412.12152099609375, |
|
"logps/rejected": -416.8878479003906, |
|
"loss": 0.2155, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": -0.0073882960714399815, |
|
"rewards/margins": 0.0029027846176177263, |
|
"rewards/rejected": -0.010291079990565777, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.646121984004666e-06, |
|
"logits/chosen": 0.11649110168218613, |
|
"logits/rejected": 0.18730294704437256, |
|
"logps/chosen": -455.03399658203125, |
|
"logps/rejected": -423.7317810058594, |
|
"loss": 0.2059, |
|
"rewards/accuracies": 0.48124998807907104, |
|
"rewards/chosen": -0.01960514858365059, |
|
"rewards/margins": 0.0047067212872207165, |
|
"rewards/rejected": -0.024311870336532593, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.4890613722044526e-06, |
|
"logits/chosen": 0.13214334845542908, |
|
"logits/rejected": 0.1627044379711151, |
|
"logps/chosen": -420.0687561035156, |
|
"logps/rejected": -419.1974182128906, |
|
"loss": 0.1974, |
|
"rewards/accuracies": 0.48124998807907104, |
|
"rewards/chosen": -0.036356501281261444, |
|
"rewards/margins": 0.009195582941174507, |
|
"rewards/rejected": -0.0455520860850811, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.3069871595684795e-06, |
|
"logits/chosen": 0.09840121865272522, |
|
"logits/rejected": 0.19065344333648682, |
|
"logps/chosen": -573.3145751953125, |
|
"logps/rejected": -588.6065063476562, |
|
"loss": 0.1903, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.07631657272577286, |
|
"rewards/margins": 0.014788051135838032, |
|
"rewards/rejected": -0.09110463410615921, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.102189034962561e-06, |
|
"logits/chosen": 0.03982686251401901, |
|
"logits/rejected": 0.06170179322361946, |
|
"logps/chosen": -483.71844482421875, |
|
"logps/rejected": -537.0131225585938, |
|
"loss": 0.1974, |
|
"rewards/accuracies": 0.4937500059604645, |
|
"rewards/chosen": -0.09070506691932678, |
|
"rewards/margins": 0.014559125527739525, |
|
"rewards/rejected": -0.10526418685913086, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.8772424536302565e-06, |
|
"logits/chosen": 0.018964925780892372, |
|
"logits/rejected": 0.08643153309822083, |
|
"logps/chosen": -517.4832763671875, |
|
"logps/rejected": -530.7005615234375, |
|
"loss": 0.2021, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": -0.1091010719537735, |
|
"rewards/margins": 0.02261071279644966, |
|
"rewards/rejected": -0.13171178102493286, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.634976249348867e-06, |
|
"logits/chosen": 0.055875927209854126, |
|
"logits/rejected": 0.0218794047832489, |
|
"logps/chosen": -516.7933959960938, |
|
"logps/rejected": -543.1007690429688, |
|
"loss": 0.1878, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.1324981302022934, |
|
"rewards/margins": 0.03664865717291832, |
|
"rewards/rejected": -0.1691468060016632, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.3784370602033572e-06, |
|
"logits/chosen": -0.0752568170428276, |
|
"logits/rejected": 0.07607444375753403, |
|
"logps/chosen": -510.17987060546875, |
|
"logps/rejected": -535.81494140625, |
|
"loss": 0.1899, |
|
"rewards/accuracies": 0.4437499940395355, |
|
"rewards/chosen": -0.13363127410411835, |
|
"rewards/margins": 0.040403980761766434, |
|
"rewards/rejected": -0.1740352362394333, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.1108510153447352e-06, |
|
"logits/chosen": -0.08575725555419922, |
|
"logits/rejected": -0.02042851224541664, |
|
"logps/chosen": -584.4342041015625, |
|
"logps/rejected": -560.9251708984375, |
|
"loss": 0.2106, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": -0.163564532995224, |
|
"rewards/margins": 0.027438053861260414, |
|
"rewards/rejected": -0.19100257754325867, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 2.835583164544139e-06, |
|
"logits/chosen": 0.0028429715894162655, |
|
"logits/rejected": 0.10017697513103485, |
|
"logps/chosen": -546.78515625, |
|
"logps/rejected": -583.113525390625, |
|
"loss": 0.2005, |
|
"rewards/accuracies": 0.5062500238418579, |
|
"rewards/chosen": -0.13426996767520905, |
|
"rewards/margins": 0.034793704748153687, |
|
"rewards/rejected": -0.16906365752220154, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 2.556095160739513e-06, |
|
"logits/chosen": -0.0652703195810318, |
|
"logits/rejected": 0.08785363286733627, |
|
"logps/chosen": -474.89013671875, |
|
"logps/rejected": -519.8175048828125, |
|
"loss": 0.1939, |
|
"rewards/accuracies": 0.45625001192092896, |
|
"rewards/chosen": -0.11705396324396133, |
|
"rewards/margins": 0.03326694667339325, |
|
"rewards/rejected": -0.150320902466774, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 2.2759017277414165e-06, |
|
"logits/chosen": -0.06097496673464775, |
|
"logits/rejected": -0.00452041020616889, |
|
"logps/chosen": -511.9466247558594, |
|
"logps/rejected": -552.163330078125, |
|
"loss": 0.1901, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.12096891552209854, |
|
"rewards/margins": 0.03578288108110428, |
|
"rewards/rejected": -0.15675179660320282, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.9985264605418185e-06, |
|
"logits/chosen": -0.005833833012729883, |
|
"logits/rejected": 0.08312544971704483, |
|
"logps/chosen": -529.5341796875, |
|
"logps/rejected": -528.4331665039062, |
|
"loss": 0.1788, |
|
"rewards/accuracies": 0.48124998807907104, |
|
"rewards/chosen": -0.12104544788599014, |
|
"rewards/margins": 0.03279378265142441, |
|
"rewards/rejected": -0.15383923053741455, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.7274575140626318e-06, |
|
"logits/chosen": -0.053509682416915894, |
|
"logits/rejected": -0.0027845636941492558, |
|
"logps/chosen": -601.9525146484375, |
|
"logps/rejected": -663.5382690429688, |
|
"loss": 0.1869, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": -0.1414756029844284, |
|
"rewards/margins": 0.056746870279312134, |
|
"rewards/rejected": -0.19822247326374054, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.466103737583699e-06, |
|
"logits/chosen": -0.005456715822219849, |
|
"logits/rejected": -0.13148556649684906, |
|
"logps/chosen": -496.91949462890625, |
|
"logps/rejected": -558.6058959960938, |
|
"loss": 0.1844, |
|
"rewards/accuracies": 0.4437499940395355, |
|
"rewards/chosen": -0.13160409033298492, |
|
"rewards/margins": 0.049931664019823074, |
|
"rewards/rejected": -0.1815357506275177, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.217751806485235e-06, |
|
"logits/chosen": -0.046624261885881424, |
|
"logits/rejected": -0.01640903949737549, |
|
"logps/chosen": -458.24212646484375, |
|
"logps/rejected": -496.15985107421875, |
|
"loss": 0.195, |
|
"rewards/accuracies": 0.46875, |
|
"rewards/chosen": -0.11825968325138092, |
|
"rewards/margins": 0.04362802952528, |
|
"rewards/rejected": -0.16188772022724152, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 9.855248903979505e-07, |
|
"logits/chosen": -0.058581192046403885, |
|
"logits/rejected": 0.12358301877975464, |
|
"logps/chosen": -599.2974853515625, |
|
"logps/rejected": -605.1329345703125, |
|
"loss": 0.1807, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": -0.14849095046520233, |
|
"rewards/margins": 0.043377481400966644, |
|
"rewards/rejected": -0.19186842441558838, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 7.723433775328385e-07, |
|
"logits/chosen": -0.04961230233311653, |
|
"logits/rejected": 0.06744717061519623, |
|
"logps/chosen": -500.963134765625, |
|
"logps/rejected": -533.319580078125, |
|
"loss": 0.1874, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": -0.1264745444059372, |
|
"rewards/margins": 0.0465153343975544, |
|
"rewards/rejected": -0.1729898750782013, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 5.808881491049723e-07, |
|
"logits/chosen": -0.012030300684273243, |
|
"logits/rejected": -0.08321252465248108, |
|
"logps/chosen": -509.8011169433594, |
|
"logps/rejected": -570.6807861328125, |
|
"loss": 0.1886, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": -0.1262587606906891, |
|
"rewards/margins": 0.05674763396382332, |
|
"rewards/rejected": -0.1830063760280609, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.1356686569674344e-07, |
|
"logits/chosen": -0.07445530593395233, |
|
"logits/rejected": 0.04640640690922737, |
|
"logps/chosen": -479.8208923339844, |
|
"logps/rejected": -571.5008544921875, |
|
"loss": 0.192, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.1300186812877655, |
|
"rewards/margins": 0.055131178349256516, |
|
"rewards/rejected": -0.18514983355998993, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 2.7248368952908055e-07, |
|
"logits/chosen": -0.024127716198563576, |
|
"logits/rejected": 0.048486463725566864, |
|
"logps/chosen": -492.6990661621094, |
|
"logps/rejected": -528.9088134765625, |
|
"loss": 0.1872, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.1340349167585373, |
|
"rewards/margins": 0.04594132676720619, |
|
"rewards/rejected": -0.17997623980045319, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.59412823400657e-07, |
|
"logits/chosen": -0.0358465239405632, |
|
"logits/rejected": 0.059707850217819214, |
|
"logps/chosen": -528.1822509765625, |
|
"logps/rejected": -566.1701049804688, |
|
"loss": 0.1907, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": -0.1274920403957367, |
|
"rewards/margins": 0.05404983088374138, |
|
"rewards/rejected": -0.18154187500476837, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 7.577619905828281e-08, |
|
"logits/chosen": -0.04293031617999077, |
|
"logits/rejected": -0.02192228101193905, |
|
"logps/chosen": -562.0809936523438, |
|
"logps/rejected": -616.6372680664062, |
|
"loss": 0.1825, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": -0.13138429820537567, |
|
"rewards/margins": 0.05358911678195, |
|
"rewards/rejected": -0.18497340381145477, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.262559558016325e-08, |
|
"logits/chosen": 0.016219165176153183, |
|
"logits/rejected": -0.03756276145577431, |
|
"logps/chosen": -530.8041381835938, |
|
"logps/rejected": -577.8040161132812, |
|
"loss": 0.1984, |
|
"rewards/accuracies": 0.4749999940395355, |
|
"rewards/chosen": -0.12510040402412415, |
|
"rewards/margins": 0.048407431691884995, |
|
"rewards/rejected": -0.17350783944129944, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 6.294126437336734e-10, |
|
"logits/chosen": -0.057995982468128204, |
|
"logits/rejected": -0.08621449768543243, |
|
"logps/chosen": -543.5442504882812, |
|
"logps/rejected": -563.89599609375, |
|
"loss": 0.1927, |
|
"rewards/accuracies": 0.4937500059604645, |
|
"rewards/chosen": -0.13217976689338684, |
|
"rewards/margins": 0.04179350286722183, |
|
"rewards/rejected": -0.17397327721118927, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 312, |
|
"total_flos": 0.0, |
|
"train_loss": 0.19580436278230104, |
|
"train_runtime": 2618.6186, |
|
"train_samples_per_second": 3.819, |
|
"train_steps_per_second": 0.119 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 312, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|