{ "best_metric": 0.014748962596058846, "best_model_checkpoint": "./Zephyr/27-03-24-Weni-kto-test_WeniGPT Experiment using KTO trainer with no collator-2_max_steps-786_batch_16_2024-03-27_ppid_1885/checkpoint-300", "epoch": 5.333333333333333, "eval_steps": 50, "global_step": 700, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.15, "grad_norm": 2.2224462032318115, "kl": 0.054589949548244476, "learning_rate": 0.0001666666666666667, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 704.2891, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 20 }, { "epoch": 0.3, "grad_norm": 2.9781014919281006, "kl": 0.014553820714354515, "learning_rate": 0.00019580052493438322, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 371.0873, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 40 }, { "epoch": 0.38, "eval_kl": 0.014955863356590271, "eval_logps/chosen": -139.6813507080078, "eval_logps/rejected": -288.11480712890625, "eval_loss": 0.0440419502556324, "eval_rewards/chosen": 4.680752754211426, "eval_rewards/margins": 14.199661254882812, "eval_rewards/rejected": -9.518909454345703, "eval_runtime": 437.1874, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.268, "step": 50 }, { "epoch": 0.46, "grad_norm": 0.7002557516098022, "kl": 0.36391472816467285, "learning_rate": 0.0001905511811023622, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 165.1605, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 60 }, { "epoch": 0.61, "grad_norm": 0.5644651651382446, "kl": 0.01063810009509325, "learning_rate": 0.00018556430446194227, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 89.197, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 80 }, { "epoch": 0.76, "grad_norm": 0.18941974639892578, "kl": 0.0, "learning_rate": 0.0001805774278215223, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 57.9834, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 100 }, { "epoch": 0.76, "eval_kl": 0.0, "eval_logps/chosen": -135.0946807861328, "eval_logps/rejected": -511.870361328125, "eval_loss": 0.027495555579662323, "eval_rewards/chosen": 5.139419078826904, "eval_rewards/margins": 37.033878326416016, "eval_rewards/rejected": -31.89446258544922, "eval_runtime": 437.2677, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.268, "step": 100 }, { "epoch": 0.91, "grad_norm": 1.2737089395523071, "kl": 0.0, "learning_rate": 0.00017532808398950132, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 1101.4947, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 120 }, { "epoch": 1.07, "grad_norm": 0.10652273148298264, "kl": 0.0, "learning_rate": 0.00017007874015748033, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 37.3685, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 140 }, { "epoch": 1.14, "eval_kl": 0.0, "eval_logps/chosen": -133.9324951171875, "eval_logps/rejected": -464.85992431640625, "eval_loss": 0.019584182649850845, "eval_rewards/chosen": 5.25563907623291, "eval_rewards/margins": 32.449058532714844, "eval_rewards/rejected": -27.19342041015625, "eval_runtime": 437.0658, "eval_samples_per_second": 1.071, "eval_steps_per_second": 0.268, "step": 150 }, { "epoch": 1.22, "grad_norm": 0.060089632868766785, "kl": 0.21830137073993683, "learning_rate": 0.00016482939632545934, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 91.1641, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 160 }, { "epoch": 1.37, "grad_norm": 0.08910108357667923, "kl": 0.03056584671139717, "learning_rate": 0.00015958005249343832, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 8.2078, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 180 }, { "epoch": 1.52, "grad_norm": 0.025327660143375397, "kl": 0.0, "learning_rate": 0.00015433070866141733, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 3.6561, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 200 }, { "epoch": 1.52, "eval_kl": 0.0, "eval_logps/chosen": -132.1833038330078, "eval_logps/rejected": -419.2353820800781, "eval_loss": 0.016207611188292503, "eval_rewards/chosen": 5.430557727813721, "eval_rewards/margins": 28.061521530151367, "eval_rewards/rejected": -22.630962371826172, "eval_runtime": 437.1734, "eval_samples_per_second": 1.071, "eval_steps_per_second": 0.268, "step": 200 }, { "epoch": 1.68, "grad_norm": 0.04120240360498428, "kl": 0.04972388595342636, "learning_rate": 0.00014908136482939634, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 21.787, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 220 }, { "epoch": 1.83, "grad_norm": 0.043324064463377, "kl": 0.09086960554122925, "learning_rate": 0.00014383202099737535, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 59.5367, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 240 }, { "epoch": 1.9, "eval_kl": 0.0, "eval_logps/chosen": -129.1339111328125, "eval_logps/rejected": -504.5447998046875, "eval_loss": 0.01432154793292284, "eval_rewards/chosen": 5.7354960441589355, "eval_rewards/margins": 36.89740753173828, "eval_rewards/rejected": -31.161909103393555, "eval_runtime": 437.5542, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.267, "step": 250 }, { "epoch": 1.98, "grad_norm": 0.028953969478607178, "kl": 0.0, "learning_rate": 0.00013858267716535433, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 5.7556, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 260 }, { "epoch": 2.13, "grad_norm": 0.022753821685910225, "kl": 0.18936340510845184, "learning_rate": 0.00013333333333333334, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 4.7774, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 280 }, { "epoch": 2.29, "grad_norm": 0.038736581802368164, "kl": 0.0, "learning_rate": 0.00012808398950131235, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 13.1891, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 300 }, { "epoch": 2.29, "eval_kl": 0.0, "eval_logps/chosen": -130.3461151123047, "eval_logps/rejected": -503.4655456542969, "eval_loss": 0.014748962596058846, "eval_rewards/chosen": 5.614275932312012, "eval_rewards/margins": 36.66826248168945, "eval_rewards/rejected": -31.05398178100586, "eval_runtime": 437.6283, "eval_samples_per_second": 1.069, "eval_steps_per_second": 0.267, "step": 300 }, { "epoch": 2.44, "grad_norm": 0.015456304885447025, "kl": 0.016447115689516068, "learning_rate": 0.00012283464566929136, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 2.946, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 320 }, { "epoch": 2.59, "grad_norm": 0.03844680264592171, "kl": 0.0, "learning_rate": 0.00011758530183727034, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 3.8532, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 340 }, { "epoch": 2.67, "eval_kl": 0.0, "eval_logps/chosen": -127.6288833618164, "eval_logps/rejected": -457.08013916015625, "eval_loss": 0.013063323684036732, "eval_rewards/chosen": 5.886001110076904, "eval_rewards/margins": 32.30144119262695, "eval_rewards/rejected": -26.415441513061523, "eval_runtime": 437.2, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.268, "step": 350 }, { "epoch": 2.74, "grad_norm": 0.013407759368419647, "kl": 0.0, "learning_rate": 0.00011233595800524934, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 14.6044, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 360 }, { "epoch": 2.9, "grad_norm": 0.1375456601381302, "kl": 0.042511165142059326, "learning_rate": 0.00010708661417322836, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 20.8824, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 380 }, { "epoch": 3.05, "grad_norm": 0.012805495411157608, "kl": 0.0, "learning_rate": 0.00010183727034120735, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 3.7678, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 400 }, { "epoch": 3.05, "eval_kl": 0.0, "eval_logps/chosen": -127.17108917236328, "eval_logps/rejected": -460.4493408203125, "eval_loss": 0.016150476410984993, "eval_rewards/chosen": 5.931778430938721, "eval_rewards/margins": 32.684139251708984, "eval_rewards/rejected": -26.75235939025879, "eval_runtime": 437.1823, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.268, "step": 400 }, { "epoch": 3.2, "grad_norm": 0.017726995050907135, "kl": 0.2870129942893982, "learning_rate": 9.658792650918635e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 1.685, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 420 }, { "epoch": 3.35, "grad_norm": 0.007917443290352821, "kl": 0.0, "learning_rate": 9.133858267716536e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 49.3456, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 440 }, { "epoch": 3.43, "eval_kl": 0.0, "eval_logps/chosen": -127.23651123046875, "eval_logps/rejected": -479.95904541015625, "eval_loss": 0.016735197976231575, "eval_rewards/chosen": 5.925236225128174, "eval_rewards/margins": 34.628562927246094, "eval_rewards/rejected": -28.703327178955078, "eval_runtime": 437.235, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.268, "step": 450 }, { "epoch": 3.5, "grad_norm": 0.016776828095316887, "kl": 0.0, "learning_rate": 8.608923884514435e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 6.9542, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 460 }, { "epoch": 3.66, "grad_norm": 0.009395829401910305, "kl": 0.0, "learning_rate": 8.083989501312336e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 25.7168, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 480 }, { "epoch": 3.81, "grad_norm": 0.008246039040386677, "kl": 0.028682123869657516, "learning_rate": 7.559055118110236e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 12.2886, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 500 }, { "epoch": 3.81, "eval_kl": 0.0, "eval_logps/chosen": -126.48030090332031, "eval_logps/rejected": -487.4185485839844, "eval_loss": 0.01638978347182274, "eval_rewards/chosen": 6.000857830047607, "eval_rewards/margins": 35.45014190673828, "eval_rewards/rejected": -29.44928550720215, "eval_runtime": 437.6004, "eval_samples_per_second": 1.069, "eval_steps_per_second": 0.267, "step": 500 }, { "epoch": 3.96, "grad_norm": 0.010333240032196045, "kl": 0.0, "learning_rate": 7.034120734908137e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 7.7922, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 520 }, { "epoch": 4.11, "grad_norm": 0.00740875443443656, "kl": 0.0027637004386633635, "learning_rate": 6.509186351706036e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 2.3745, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 540 }, { "epoch": 4.19, "eval_kl": 0.0, "eval_logps/chosen": -126.36491394042969, "eval_logps/rejected": -492.7337951660156, "eval_loss": 0.017263902351260185, "eval_rewards/chosen": 6.012395858764648, "eval_rewards/margins": 35.993202209472656, "eval_rewards/rejected": -29.980806350708008, "eval_runtime": 437.4134, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.267, "step": 550 }, { "epoch": 4.27, "grad_norm": 0.01026746816933155, "kl": 0.2550431191921234, "learning_rate": 5.984251968503938e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 3.1176, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 560 }, { "epoch": 4.42, "grad_norm": 0.006209189537912607, "kl": 0.0, "learning_rate": 5.4593175853018376e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 3.788, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 580 }, { "epoch": 4.57, "grad_norm": 0.007328963838517666, "kl": 0.0, "learning_rate": 4.934383202099738e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.46, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 600 }, { "epoch": 4.57, "eval_kl": 0.0, "eval_logps/chosen": -126.42925262451172, "eval_logps/rejected": -497.53179931640625, "eval_loss": 0.01734013482928276, "eval_rewards/chosen": 6.0059614181518555, "eval_rewards/margins": 36.46657180786133, "eval_rewards/rejected": -30.460607528686523, "eval_runtime": 437.192, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.268, "step": 600 }, { "epoch": 4.72, "grad_norm": 0.006411154754459858, "kl": 0.0, "learning_rate": 4.409448818897638e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 5.6447, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 620 }, { "epoch": 4.88, "grad_norm": 0.006403986364603043, "kl": 0.0061653731390833855, "learning_rate": 3.8845144356955383e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 7.7723, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 640 }, { "epoch": 4.95, "eval_kl": 0.0, "eval_logps/chosen": -126.40960693359375, "eval_logps/rejected": -499.95538330078125, "eval_loss": 0.017983298748731613, "eval_rewards/chosen": 6.007925510406494, "eval_rewards/margins": 36.71089172363281, "eval_rewards/rejected": -30.702964782714844, "eval_runtime": 437.1466, "eval_samples_per_second": 1.071, "eval_steps_per_second": 0.268, "step": 650 }, { "epoch": 5.03, "grad_norm": 0.011981657706201077, "kl": 0.0, "learning_rate": 3.3595800524934386e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 2.9837, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 660 }, { "epoch": 5.18, "grad_norm": 0.003182880114763975, "kl": 0.24422872066497803, "learning_rate": 2.8346456692913388e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 15.7313, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 680 }, { "epoch": 5.33, "grad_norm": 0.004406992811709642, "kl": 0.0, "learning_rate": 2.309711286089239e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 4.1333, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 700 }, { "epoch": 5.33, "eval_kl": 0.0, "eval_logps/chosen": -126.45208740234375, "eval_logps/rejected": -501.8733825683594, "eval_loss": 0.018385978415608406, "eval_rewards/chosen": 6.003678321838379, "eval_rewards/margins": 36.89844512939453, "eval_rewards/rejected": -30.89476776123047, "eval_runtime": 436.8506, "eval_samples_per_second": 1.071, "eval_steps_per_second": 0.268, "step": 700 } ], "logging_steps": 20, "max_steps": 786, "num_input_tokens_seen": 0, "num_train_epochs": 6, "save_steps": 100, "total_flos": 0.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }