|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9984, |
|
"eval_steps": 500, |
|
"global_step": 156, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 43.88695656143784, |
|
"learning_rate": 3.125e-08, |
|
"logits/chosen": -2.2037925720214844, |
|
"logits/rejected": -1.493311882019043, |
|
"logps/chosen": -129.30540466308594, |
|
"logps/rejected": -155.7332000732422, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 29.303706464347986, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.2856006622314453, |
|
"logits/rejected": -1.7261626720428467, |
|
"logps/chosen": -151.80892944335938, |
|
"logps/rejected": -181.124755859375, |
|
"loss": 0.682, |
|
"rewards/accuracies": 0.5833333134651184, |
|
"rewards/chosen": -0.04080594703555107, |
|
"rewards/margins": 0.03990985453128815, |
|
"rewards/rejected": -0.08071580529212952, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 30.19368756466062, |
|
"learning_rate": 4.989935734988097e-07, |
|
"logits/chosen": -2.2370126247406006, |
|
"logits/rejected": -1.4457467794418335, |
|
"logps/chosen": -218.135009765625, |
|
"logps/rejected": -258.29510498046875, |
|
"loss": 0.6503, |
|
"rewards/accuracies": 0.6156250238418579, |
|
"rewards/chosen": -0.7151989340782166, |
|
"rewards/margins": 0.19002024829387665, |
|
"rewards/rejected": -0.9052190780639648, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 30.744279703525947, |
|
"learning_rate": 4.877641290737883e-07, |
|
"logits/chosen": -2.1132819652557373, |
|
"logits/rejected": -1.3941830396652222, |
|
"logps/chosen": -199.1055145263672, |
|
"logps/rejected": -239.34249877929688, |
|
"loss": 0.6229, |
|
"rewards/accuracies": 0.671875, |
|
"rewards/chosen": -0.5117620229721069, |
|
"rewards/margins": 0.264423668384552, |
|
"rewards/rejected": -0.7761856913566589, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 26.98732429420661, |
|
"learning_rate": 4.646121984004665e-07, |
|
"logits/chosen": -2.2066855430603027, |
|
"logits/rejected": -1.2292633056640625, |
|
"logps/chosen": -188.8787841796875, |
|
"logps/rejected": -265.1071472167969, |
|
"loss": 0.5738, |
|
"rewards/accuracies": 0.753125011920929, |
|
"rewards/chosen": -0.45722055435180664, |
|
"rewards/margins": 0.5506829619407654, |
|
"rewards/rejected": -1.0079034566879272, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 24.079733843730505, |
|
"learning_rate": 4.3069871595684787e-07, |
|
"logits/chosen": -1.9566110372543335, |
|
"logits/rejected": -1.0668319463729858, |
|
"logps/chosen": -214.89682006835938, |
|
"logps/rejected": -296.9588623046875, |
|
"loss": 0.5684, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.6292861104011536, |
|
"rewards/margins": 0.594062089920044, |
|
"rewards/rejected": -1.2233481407165527, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 33.30951831902919, |
|
"learning_rate": 3.877242453630256e-07, |
|
"logits/chosen": -2.0148699283599854, |
|
"logits/rejected": -1.5277093648910522, |
|
"logps/chosen": -219.0657958984375, |
|
"logps/rejected": -274.61724853515625, |
|
"loss": 0.5746, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.6577062010765076, |
|
"rewards/margins": 0.4176337718963623, |
|
"rewards/rejected": -1.075339913368225, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 33.78144466638135, |
|
"learning_rate": 3.378437060203357e-07, |
|
"logits/chosen": -1.801613450050354, |
|
"logits/rejected": -1.3443340063095093, |
|
"logps/chosen": -202.5223388671875, |
|
"logps/rejected": -285.18939208984375, |
|
"loss": 0.5695, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.6388754844665527, |
|
"rewards/margins": 0.5537781715393066, |
|
"rewards/rejected": -1.1926535367965698, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 27.821012779285784, |
|
"learning_rate": 2.8355831645441387e-07, |
|
"logits/chosen": -1.8170337677001953, |
|
"logits/rejected": -1.3328382968902588, |
|
"logps/chosen": -208.6138153076172, |
|
"logps/rejected": -285.3661804199219, |
|
"loss": 0.5502, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.6443996429443359, |
|
"rewards/margins": 0.5184130072593689, |
|
"rewards/rejected": -1.1628127098083496, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 32.68944554362572, |
|
"learning_rate": 2.2759017277414164e-07, |
|
"logits/chosen": -1.6956002712249756, |
|
"logits/rejected": -0.9904757738113403, |
|
"logps/chosen": -215.2491912841797, |
|
"logps/rejected": -314.622314453125, |
|
"loss": 0.549, |
|
"rewards/accuracies": 0.7593749761581421, |
|
"rewards/chosen": -0.688922107219696, |
|
"rewards/margins": 0.7578561305999756, |
|
"rewards/rejected": -1.4467782974243164, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 24.987088937796276, |
|
"learning_rate": 1.7274575140626315e-07, |
|
"logits/chosen": -1.6817023754119873, |
|
"logits/rejected": -0.9368717074394226, |
|
"logps/chosen": -212.26806640625, |
|
"logps/rejected": -296.8437805175781, |
|
"loss": 0.5617, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.7037585973739624, |
|
"rewards/margins": 0.5949375629425049, |
|
"rewards/rejected": -1.2986961603164673, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 120.0167955365787, |
|
"learning_rate": 1.2177518064852348e-07, |
|
"logits/chosen": -1.8496421575546265, |
|
"logits/rejected": -0.7083995342254639, |
|
"logps/chosen": -204.84237670898438, |
|
"logps/rejected": -285.4545593261719, |
|
"loss": 0.5655, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.6622025370597839, |
|
"rewards/margins": 0.6420421600341797, |
|
"rewards/rejected": -1.3042447566986084, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 29.13926345567332, |
|
"learning_rate": 7.723433775328384e-08, |
|
"logits/chosen": -1.6169201135635376, |
|
"logits/rejected": -0.7033487558364868, |
|
"logps/chosen": -219.37203979492188, |
|
"logps/rejected": -313.266845703125, |
|
"loss": 0.5701, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.7050714492797852, |
|
"rewards/margins": 0.6713167428970337, |
|
"rewards/rejected": -1.3763883113861084, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 24.14450890897982, |
|
"learning_rate": 4.1356686569674335e-08, |
|
"logits/chosen": -1.6877450942993164, |
|
"logits/rejected": -0.4587131440639496, |
|
"logps/chosen": -211.0887451171875, |
|
"logps/rejected": -298.6360168457031, |
|
"loss": 0.5568, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.6507588028907776, |
|
"rewards/margins": 0.6841335296630859, |
|
"rewards/rejected": -1.3348922729492188, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 26.54072268887061, |
|
"learning_rate": 1.5941282340065697e-08, |
|
"logits/chosen": -1.6877959966659546, |
|
"logits/rejected": -1.1048305034637451, |
|
"logps/chosen": -205.51443481445312, |
|
"logps/rejected": -284.08270263671875, |
|
"loss": 0.5438, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.6199182271957397, |
|
"rewards/margins": 0.6091288328170776, |
|
"rewards/rejected": -1.2290470600128174, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 28.93586626169144, |
|
"learning_rate": 2.2625595580163247e-09, |
|
"logits/chosen": -1.6776434183120728, |
|
"logits/rejected": -0.7655587792396545, |
|
"logps/chosen": -212.2307891845703, |
|
"logps/rejected": -279.22686767578125, |
|
"loss": 0.5468, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": -0.6327279806137085, |
|
"rewards/margins": 0.518112063407898, |
|
"rewards/rejected": -1.150840163230896, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 156, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5795359783447706, |
|
"train_runtime": 30147.7887, |
|
"train_samples_per_second": 0.663, |
|
"train_steps_per_second": 0.005 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 156, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 200, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|