{ "best_metric": 0.014748962596058846, "best_model_checkpoint": "./Zephyr/27-03-24-Weni-kto-test_WeniGPT Experiment using KTO trainer with no collator-2_max_steps-786_batch_16_2024-03-27_ppid_1885/checkpoint-300", "epoch": 3.0476190476190474, "eval_steps": 50, "global_step": 400, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.15, "grad_norm": 2.2224462032318115, "kl": 0.054589949548244476, "learning_rate": 0.0001666666666666667, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 704.2891, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 20 }, { "epoch": 0.3, "grad_norm": 2.9781014919281006, "kl": 0.014553820714354515, "learning_rate": 0.00019580052493438322, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 371.0873, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 40 }, { "epoch": 0.38, "eval_kl": 0.014955863356590271, "eval_logps/chosen": -139.6813507080078, "eval_logps/rejected": -288.11480712890625, "eval_loss": 0.0440419502556324, "eval_rewards/chosen": 4.680752754211426, "eval_rewards/margins": 14.199661254882812, "eval_rewards/rejected": -9.518909454345703, "eval_runtime": 437.1874, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.268, "step": 50 }, { "epoch": 0.46, "grad_norm": 0.7002557516098022, "kl": 0.36391472816467285, "learning_rate": 0.0001905511811023622, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 165.1605, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 60 }, { "epoch": 0.61, "grad_norm": 0.5644651651382446, "kl": 0.01063810009509325, "learning_rate": 0.00018556430446194227, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 89.197, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 80 }, { "epoch": 0.76, "grad_norm": 0.18941974639892578, "kl": 0.0, "learning_rate": 0.0001805774278215223, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 57.9834, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 100 }, { "epoch": 0.76, "eval_kl": 0.0, "eval_logps/chosen": -135.0946807861328, "eval_logps/rejected": -511.870361328125, "eval_loss": 0.027495555579662323, "eval_rewards/chosen": 5.139419078826904, "eval_rewards/margins": 37.033878326416016, "eval_rewards/rejected": -31.89446258544922, "eval_runtime": 437.2677, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.268, "step": 100 }, { "epoch": 0.91, "grad_norm": 1.2737089395523071, "kl": 0.0, "learning_rate": 0.00017532808398950132, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 1101.4947, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 120 }, { "epoch": 1.07, "grad_norm": 0.10652273148298264, "kl": 0.0, "learning_rate": 0.00017007874015748033, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 37.3685, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 140 }, { "epoch": 1.14, "eval_kl": 0.0, "eval_logps/chosen": -133.9324951171875, "eval_logps/rejected": -464.85992431640625, "eval_loss": 0.019584182649850845, "eval_rewards/chosen": 5.25563907623291, "eval_rewards/margins": 32.449058532714844, "eval_rewards/rejected": -27.19342041015625, "eval_runtime": 437.0658, "eval_samples_per_second": 1.071, "eval_steps_per_second": 0.268, "step": 150 }, { "epoch": 1.22, "grad_norm": 0.060089632868766785, "kl": 0.21830137073993683, "learning_rate": 0.00016482939632545934, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 91.1641, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 160 }, { "epoch": 1.37, "grad_norm": 0.08910108357667923, "kl": 0.03056584671139717, "learning_rate": 0.00015958005249343832, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 8.2078, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 180 }, { "epoch": 1.52, "grad_norm": 0.025327660143375397, "kl": 0.0, "learning_rate": 0.00015433070866141733, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 3.6561, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 200 }, { "epoch": 1.52, "eval_kl": 0.0, "eval_logps/chosen": -132.1833038330078, "eval_logps/rejected": -419.2353820800781, "eval_loss": 0.016207611188292503, "eval_rewards/chosen": 5.430557727813721, "eval_rewards/margins": 28.061521530151367, "eval_rewards/rejected": -22.630962371826172, "eval_runtime": 437.1734, "eval_samples_per_second": 1.071, "eval_steps_per_second": 0.268, "step": 200 }, { "epoch": 1.68, "grad_norm": 0.04120240360498428, "kl": 0.04972388595342636, "learning_rate": 0.00014908136482939634, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 21.787, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 220 }, { "epoch": 1.83, "grad_norm": 0.043324064463377, "kl": 0.09086960554122925, "learning_rate": 0.00014383202099737535, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 59.5367, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 240 }, { "epoch": 1.9, "eval_kl": 0.0, "eval_logps/chosen": -129.1339111328125, "eval_logps/rejected": -504.5447998046875, "eval_loss": 0.01432154793292284, "eval_rewards/chosen": 5.7354960441589355, "eval_rewards/margins": 36.89740753173828, "eval_rewards/rejected": -31.161909103393555, "eval_runtime": 437.5542, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.267, "step": 250 }, { "epoch": 1.98, "grad_norm": 0.028953969478607178, "kl": 0.0, "learning_rate": 0.00013858267716535433, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 5.7556, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 260 }, { "epoch": 2.13, "grad_norm": 0.022753821685910225, "kl": 0.18936340510845184, "learning_rate": 0.00013333333333333334, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 4.7774, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 280 }, { "epoch": 2.29, "grad_norm": 0.038736581802368164, "kl": 0.0, "learning_rate": 0.00012808398950131235, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 13.1891, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 300 }, { "epoch": 2.29, "eval_kl": 0.0, "eval_logps/chosen": -130.3461151123047, "eval_logps/rejected": -503.4655456542969, "eval_loss": 0.014748962596058846, "eval_rewards/chosen": 5.614275932312012, "eval_rewards/margins": 36.66826248168945, "eval_rewards/rejected": -31.05398178100586, "eval_runtime": 437.6283, "eval_samples_per_second": 1.069, "eval_steps_per_second": 0.267, "step": 300 }, { "epoch": 2.44, "grad_norm": 0.015456304885447025, "kl": 0.016447115689516068, "learning_rate": 0.00012283464566929136, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 2.946, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 320 }, { "epoch": 2.59, "grad_norm": 0.03844680264592171, "kl": 0.0, "learning_rate": 0.00011758530183727034, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 3.8532, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 340 }, { "epoch": 2.67, "eval_kl": 0.0, "eval_logps/chosen": -127.6288833618164, "eval_logps/rejected": -457.08013916015625, "eval_loss": 0.013063323684036732, "eval_rewards/chosen": 5.886001110076904, "eval_rewards/margins": 32.30144119262695, "eval_rewards/rejected": -26.415441513061523, "eval_runtime": 437.2, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.268, "step": 350 }, { "epoch": 2.74, "grad_norm": 0.013407759368419647, "kl": 0.0, "learning_rate": 0.00011233595800524934, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 14.6044, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 360 }, { "epoch": 2.9, "grad_norm": 0.1375456601381302, "kl": 0.042511165142059326, "learning_rate": 0.00010708661417322836, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 20.8824, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 380 }, { "epoch": 3.05, "grad_norm": 0.012805495411157608, "kl": 0.0, "learning_rate": 0.00010183727034120735, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 3.7678, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 400 }, { "epoch": 3.05, "eval_kl": 0.0, "eval_logps/chosen": -127.17108917236328, "eval_logps/rejected": -460.4493408203125, "eval_loss": 0.016150476410984993, "eval_rewards/chosen": 5.931778430938721, "eval_rewards/margins": 32.684139251708984, "eval_rewards/rejected": -26.75235939025879, "eval_runtime": 437.1823, "eval_samples_per_second": 1.07, "eval_steps_per_second": 0.268, "step": 400 } ], "logging_steps": 20, "max_steps": 786, "num_input_tokens_seen": 0, "num_train_epochs": 6, "save_steps": 100, "total_flos": 0.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }