{ "best_metric": 0.0067643518559634686, "best_model_checkpoint": "./Zephyr/28-03-24-Weni-WeniGPT-QA-Zephyr-7B-5.0.0-KTO_WeniGPT Experiment using KTO trainer with no collator, Mixstral model and random system prompt.-2_max_steps-786_batch_32_2024-03-28_ppid_9/checkpoint-600", "epoch": 4.562737642585551, "eval_steps": 50, "global_step": 600, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.15, "grad_norm": 3.2390341758728027, "kl": 1.4365953207015991, "learning_rate": 0.0001666666666666667, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 1.2323, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 20 }, { "epoch": 0.3, "grad_norm": 0.6226775050163269, "kl": 0.1346512734889984, "learning_rate": 0.00019580052493438322, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.1437, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 40 }, { "epoch": 0.38, "eval_kl": 0.0960722267627716, "eval_logps/chosen": -158.278564453125, "eval_logps/rejected": -400.1436767578125, "eval_loss": 0.028233768418431282, "eval_rewards/chosen": 5.2842488288879395, "eval_rewards/margins": 25.394336700439453, "eval_rewards/rejected": -20.11008644104004, "eval_runtime": 214.1148, "eval_samples_per_second": 2.232, "eval_steps_per_second": 0.56, "step": 50 }, { "epoch": 0.46, "grad_norm": 1.9644193649291992, "kl": 0.026479745283722878, "learning_rate": 0.0001905511811023622, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.1405, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 60 }, { "epoch": 0.61, "grad_norm": 0.28626948595046997, "kl": 0.018679404631257057, "learning_rate": 0.00018530183727034121, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.1041, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 80 }, { "epoch": 0.76, "grad_norm": 0.0979766920208931, "kl": 0.0, "learning_rate": 0.00018005249343832022, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0615, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 100 }, { "epoch": 0.76, "eval_kl": 0.5886130928993225, "eval_logps/chosen": -153.61863708496094, "eval_logps/rejected": -383.54296875, "eval_loss": 0.022206587716937065, "eval_rewards/chosen": 5.750241756439209, "eval_rewards/margins": 24.20025062561035, "eval_rewards/rejected": -18.450010299682617, "eval_runtime": 213.9917, "eval_samples_per_second": 2.234, "eval_steps_per_second": 0.561, "step": 100 }, { "epoch": 0.91, "grad_norm": 0.3924414813518524, "kl": 0.0, "learning_rate": 0.00017480314960629923, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0396, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 120 }, { "epoch": 1.06, "grad_norm": 0.0917697548866272, "kl": 0.2697001099586487, "learning_rate": 0.00016955380577427821, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0346, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 140 }, { "epoch": 1.14, "eval_kl": 0.3035987615585327, "eval_logps/chosen": -162.28248596191406, "eval_logps/rejected": -612.7335205078125, "eval_loss": 0.03983871266245842, "eval_rewards/chosen": 4.883855819702148, "eval_rewards/margins": 46.25292205810547, "eval_rewards/rejected": -41.36906814575195, "eval_runtime": 214.0091, "eval_samples_per_second": 2.234, "eval_steps_per_second": 0.561, "step": 150 }, { "epoch": 1.22, "grad_norm": 0.16112379729747772, "kl": 0.2598298192024231, "learning_rate": 0.00016430446194225722, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0437, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 160 }, { "epoch": 1.37, "grad_norm": 0.04856234788894653, "kl": 0.09709737449884415, "learning_rate": 0.00015958005249343832, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0288, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 180 }, { "epoch": 1.52, "grad_norm": 0.5220792889595032, "kl": 0.055810533463954926, "learning_rate": 0.00015433070866141733, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0563, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 200 }, { "epoch": 1.52, "eval_kl": 0.15839433670043945, "eval_logps/chosen": -149.37533569335938, "eval_logps/rejected": -463.8907165527344, "eval_loss": 0.021165071055293083, "eval_rewards/chosen": 6.174570083618164, "eval_rewards/margins": 32.65935134887695, "eval_rewards/rejected": -26.48478126525879, "eval_runtime": 214.0155, "eval_samples_per_second": 2.233, "eval_steps_per_second": 0.561, "step": 200 }, { "epoch": 1.67, "grad_norm": 0.05050266534090042, "kl": 0.0, "learning_rate": 0.00014908136482939634, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0298, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 220 }, { "epoch": 1.83, "grad_norm": 1.530806064605713, "kl": 0.0, "learning_rate": 0.00014383202099737535, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0533, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 240 }, { "epoch": 1.9, "eval_kl": 0.4594874680042267, "eval_logps/chosen": -149.20762634277344, "eval_logps/rejected": -489.6085205078125, "eval_loss": 0.013410959392786026, "eval_rewards/chosen": 6.191342353820801, "eval_rewards/margins": 35.24790573120117, "eval_rewards/rejected": -29.056562423706055, "eval_runtime": 214.0202, "eval_samples_per_second": 2.233, "eval_steps_per_second": 0.561, "step": 250 }, { "epoch": 1.98, "grad_norm": 0.017549920827150345, "kl": 0.0, "learning_rate": 0.00013858267716535433, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0461, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 260 }, { "epoch": 2.13, "grad_norm": 0.021922318264842033, "kl": 0.4756055772304535, "learning_rate": 0.00013333333333333334, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.013, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 280 }, { "epoch": 2.28, "grad_norm": 0.02235027775168419, "kl": 0.16024529933929443, "learning_rate": 0.00012808398950131235, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0076, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 300 }, { "epoch": 2.28, "eval_kl": 0.6192654967308044, "eval_logps/chosen": -147.9675750732422, "eval_logps/rejected": -453.9040222167969, "eval_loss": 0.016069181263446808, "eval_rewards/chosen": 6.315345764160156, "eval_rewards/margins": 31.80146026611328, "eval_rewards/rejected": -25.486116409301758, "eval_runtime": 214.0005, "eval_samples_per_second": 2.234, "eval_steps_per_second": 0.561, "step": 300 }, { "epoch": 2.43, "grad_norm": 0.6879442930221558, "kl": 0.0, "learning_rate": 0.00012283464566929136, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0068, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 320 }, { "epoch": 2.59, "grad_norm": 0.011550226248800755, "kl": 0.0, "learning_rate": 0.00011758530183727034, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.011, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 340 }, { "epoch": 2.66, "eval_kl": 0.4912835657596588, "eval_logps/chosen": -147.81866455078125, "eval_logps/rejected": -575.878662109375, "eval_loss": 0.011971179395914078, "eval_rewards/chosen": 6.3302388191223145, "eval_rewards/margins": 44.013816833496094, "eval_rewards/rejected": -37.68357849121094, "eval_runtime": 214.4054, "eval_samples_per_second": 2.229, "eval_steps_per_second": 0.56, "step": 350 }, { "epoch": 2.74, "grad_norm": 0.019854838028550148, "kl": 0.05510709434747696, "learning_rate": 0.00011233595800524934, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0422, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 360 }, { "epoch": 2.89, "grad_norm": 0.03696136921644211, "kl": 0.0, "learning_rate": 0.00010708661417322836, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0192, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 380 }, { "epoch": 3.04, "grad_norm": 0.016547098755836487, "kl": 0.17841024696826935, "learning_rate": 0.00010183727034120735, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0049, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 400 }, { "epoch": 3.04, "eval_kl": 0.46487128734588623, "eval_logps/chosen": -147.84844970703125, "eval_logps/rejected": -498.3661804199219, "eval_loss": 0.010233537293970585, "eval_rewards/chosen": 6.327260494232178, "eval_rewards/margins": 36.25959014892578, "eval_rewards/rejected": -29.93233299255371, "eval_runtime": 214.0009, "eval_samples_per_second": 2.234, "eval_steps_per_second": 0.561, "step": 400 }, { "epoch": 3.19, "grad_norm": 0.029680678620934486, "kl": 0.40129226446151733, "learning_rate": 9.658792650918635e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0096, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 420 }, { "epoch": 3.35, "grad_norm": 0.01727895997464657, "kl": 0.006236672401428223, "learning_rate": 9.133858267716536e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0028, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 440 }, { "epoch": 3.42, "eval_kl": 0.29486456513404846, "eval_logps/chosen": -145.90557861328125, "eval_logps/rejected": -540.0706787109375, "eval_loss": 0.008336723782122135, "eval_rewards/chosen": 6.521546840667725, "eval_rewards/margins": 40.62433624267578, "eval_rewards/rejected": -34.10279083251953, "eval_runtime": 214.0142, "eval_samples_per_second": 2.233, "eval_steps_per_second": 0.561, "step": 450 }, { "epoch": 3.5, "grad_norm": 0.7774410843849182, "kl": 0.0, "learning_rate": 8.608923884514435e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0052, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 460 }, { "epoch": 3.65, "grad_norm": 0.009982769377529621, "kl": 0.0, "learning_rate": 8.083989501312336e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0075, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 480 }, { "epoch": 3.8, "grad_norm": 0.04710818827152252, "kl": 0.0, "learning_rate": 7.559055118110236e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0087, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 500 }, { "epoch": 3.8, "eval_kl": 0.09233824163675308, "eval_logps/chosen": -147.00436401367188, "eval_logps/rejected": -551.1768798828125, "eval_loss": 0.009622834622859955, "eval_rewards/chosen": 6.411670207977295, "eval_rewards/margins": 41.62507247924805, "eval_rewards/rejected": -35.213401794433594, "eval_runtime": 213.9724, "eval_samples_per_second": 2.234, "eval_steps_per_second": 0.561, "step": 500 }, { "epoch": 3.95, "grad_norm": 0.015628578141331673, "kl": 0.0, "learning_rate": 7.034120734908137e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.004, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 520 }, { "epoch": 4.11, "grad_norm": 0.006123641971498728, "kl": 0.14367903769016266, "learning_rate": 6.509186351706036e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.004, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 540 }, { "epoch": 4.18, "eval_kl": 0.15743787586688995, "eval_logps/chosen": -145.41305541992188, "eval_logps/rejected": -575.3412475585938, "eval_loss": 0.007457592524588108, "eval_rewards/chosen": 6.57080078125, "eval_rewards/margins": 44.20064163208008, "eval_rewards/rejected": -37.62984085083008, "eval_runtime": 214.0334, "eval_samples_per_second": 2.233, "eval_steps_per_second": 0.561, "step": 550 }, { "epoch": 4.26, "grad_norm": 0.0043973783031105995, "kl": 0.09317927062511444, "learning_rate": 5.984251968503938e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0027, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 560 }, { "epoch": 4.41, "grad_norm": 0.009126279503107071, "kl": 0.0, "learning_rate": 5.4593175853018376e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0031, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 580 }, { "epoch": 4.56, "grad_norm": 0.00867411121726036, "kl": 0.0, "learning_rate": 4.934383202099738e-05, "logps/chosen": NaN, "logps/rejected": NaN, "loss": 0.0036, "rewards/chosen": NaN, "rewards/margins": NaN, "rewards/rejected": NaN, "step": 600 }, { "epoch": 4.56, "eval_kl": 0.16292737424373627, "eval_logps/chosen": -144.68930053710938, "eval_logps/rejected": -565.9076538085938, "eval_loss": 0.0067643518559634686, "eval_rewards/chosen": 6.643174648284912, "eval_rewards/margins": 43.329654693603516, "eval_rewards/rejected": -36.68648147583008, "eval_runtime": 213.9275, "eval_samples_per_second": 2.234, "eval_steps_per_second": 0.561, "step": 600 } ], "logging_steps": 20, "max_steps": 786, "num_input_tokens_seen": 0, "num_train_epochs": 6, "save_steps": 100, "total_flos": 0.0, "train_batch_size": 4, "trial_name": null, "trial_params": null }