{ "best_metric": 0.1095743179321289, "best_model_checkpoint": "./mistral/22-04-24-Weni-WeniGPT-Agents-Mistral-1.0.0-SFT-1.0.21-DPO_Experiment on DPO with other hyperparameters and best SFT model of WeniGPT-2_max_steps-180_batch_8_2024-04-22_ppid_9/checkpoint-90", "epoch": 2.903225806451613, "eval_steps": 30, "global_step": 90, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.3225806451612903, "grad_norm": 13.277989387512207, "learning_rate": 4.971264367816092e-06, "logits/chosen": -1.7891426086425781, "logits/rejected": -1.814126968383789, "logps/chosen": -200.95481872558594, "logps/rejected": -223.9504852294922, "loss": 0.6771, "rewards/accuracies": 0.4000000059604645, "rewards/chosen": 0.02733452059328556, "rewards/margins": 0.028555069118738174, "rewards/rejected": -0.0012205507373437285, "step": 10 }, { "epoch": 0.6451612903225806, "grad_norm": 11.217987060546875, "learning_rate": 4.683908045977012e-06, "logits/chosen": -1.8347755670547485, "logits/rejected": -1.8467401266098022, "logps/chosen": -144.74403381347656, "logps/rejected": -191.45069885253906, "loss": 0.5478, "rewards/accuracies": 0.949999988079071, "rewards/chosen": 0.37081974744796753, "rewards/margins": 0.4048076272010803, "rewards/rejected": -0.03398784250020981, "step": 20 }, { "epoch": 0.967741935483871, "grad_norm": 11.224645614624023, "learning_rate": 4.396551724137931e-06, "logits/chosen": -1.785586953163147, "logits/rejected": -1.8152267932891846, "logps/chosen": -190.48626708984375, "logps/rejected": -248.2376251220703, "loss": 0.3976, "rewards/accuracies": 1.0, "rewards/chosen": 0.7104076147079468, "rewards/margins": 0.848293662071228, "rewards/rejected": -0.13788609206676483, "step": 30 }, { "epoch": 0.967741935483871, "eval_logits/chosen": -1.7412782907485962, "eval_logits/rejected": -1.7560926675796509, "eval_logps/chosen": -115.03002166748047, "eval_logps/rejected": -160.6632080078125, "eval_loss": 0.3185553252696991, "eval_rewards/accuracies": 1.0, "eval_rewards/chosen": 0.8372641801834106, "eval_rewards/margins": 0.8753501176834106, "eval_rewards/rejected": -0.038085997104644775, "eval_runtime": 10.5823, "eval_samples_per_second": 2.646, "eval_steps_per_second": 0.661, "step": 30 }, { "epoch": 1.2903225806451613, "grad_norm": 5.652975082397461, "learning_rate": 4.1091954022988515e-06, "logits/chosen": -1.7882213592529297, "logits/rejected": -1.7954260110855103, "logps/chosen": -159.75631713867188, "logps/rejected": -182.1666717529297, "loss": 0.2432, "rewards/accuracies": 0.8999999761581421, "rewards/chosen": 1.2001782655715942, "rewards/margins": 1.2467749118804932, "rewards/rejected": -0.04659665375947952, "step": 40 }, { "epoch": 1.6129032258064515, "grad_norm": 3.635425090789795, "learning_rate": 3.82183908045977e-06, "logits/chosen": -1.8176883459091187, "logits/rejected": -1.8400278091430664, "logps/chosen": -114.22891998291016, "logps/rejected": -197.11019897460938, "loss": 0.242, "rewards/accuracies": 0.949999988079071, "rewards/chosen": 1.5082905292510986, "rewards/margins": 2.3228204250335693, "rewards/rejected": -0.8145298957824707, "step": 50 }, { "epoch": 1.935483870967742, "grad_norm": 5.051187038421631, "learning_rate": 3.5344827586206898e-06, "logits/chosen": -1.769321084022522, "logits/rejected": -1.7883247137069702, "logps/chosen": -149.78382873535156, "logps/rejected": -172.86285400390625, "loss": 0.1934, "rewards/accuracies": 0.8999999761581421, "rewards/chosen": 1.4219303131103516, "rewards/margins": 1.7929319143295288, "rewards/rejected": -0.3710017502307892, "step": 60 }, { "epoch": 1.935483870967742, "eval_logits/chosen": -1.745606541633606, "eval_logits/rejected": -1.7603371143341064, "eval_logps/chosen": -110.98043060302734, "eval_logps/rejected": -163.220703125, "eval_loss": 0.17523464560508728, "eval_rewards/accuracies": 1.0, "eval_rewards/chosen": 1.6471832990646362, "eval_rewards/margins": 2.1967694759368896, "eval_rewards/rejected": -0.5495859384536743, "eval_runtime": 10.5122, "eval_samples_per_second": 2.664, "eval_steps_per_second": 0.666, "step": 60 }, { "epoch": 2.258064516129032, "grad_norm": 5.4414238929748535, "learning_rate": 3.24712643678161e-06, "logits/chosen": -1.761417031288147, "logits/rejected": -1.7780914306640625, "logps/chosen": -153.3155059814453, "logps/rejected": -175.71119689941406, "loss": 0.1552, "rewards/accuracies": 1.0, "rewards/chosen": 1.9575698375701904, "rewards/margins": 2.617478847503662, "rewards/rejected": -0.6599088907241821, "step": 70 }, { "epoch": 2.5806451612903225, "grad_norm": 3.4745564460754395, "learning_rate": 2.988505747126437e-06, "logits/chosen": -1.8878180980682373, "logits/rejected": -1.895895004272461, "logps/chosen": -148.46951293945312, "logps/rejected": -211.92874145507812, "loss": 0.157, "rewards/accuracies": 0.949999988079071, "rewards/chosen": 1.930851697921753, "rewards/margins": 2.822469711303711, "rewards/rejected": -0.8916179537773132, "step": 80 }, { "epoch": 2.903225806451613, "grad_norm": 4.5158843994140625, "learning_rate": 2.7011494252873567e-06, "logits/chosen": -1.8586393594741821, "logits/rejected": -1.8672926425933838, "logps/chosen": -166.34080505371094, "logps/rejected": -204.51248168945312, "loss": 0.1058, "rewards/accuracies": 1.0, "rewards/chosen": 2.207078695297241, "rewards/margins": 3.182745933532715, "rewards/rejected": -0.9756671190261841, "step": 90 }, { "epoch": 2.903225806451613, "eval_logits/chosen": -1.7490978240966797, "eval_logits/rejected": -1.7636711597442627, "eval_logps/chosen": -109.27593231201172, "eval_logps/rejected": -164.40065002441406, "eval_loss": 0.1095743179321289, "eval_rewards/accuracies": 1.0, "eval_rewards/chosen": 1.9880834817886353, "eval_rewards/margins": 2.77366042137146, "eval_rewards/rejected": -0.7855769991874695, "eval_runtime": 10.5091, "eval_samples_per_second": 2.664, "eval_steps_per_second": 0.666, "step": 90 } ], "logging_steps": 10, "max_steps": 180, "num_input_tokens_seen": 0, "num_train_epochs": 6, "save_steps": 90, "total_flos": 0.0, "train_batch_size": 1, "trial_name": null, "trial_params": null }