zephyr-7b-dpo-lora / all_results.json
jikaixuan's picture
Training in progress, epoch 0
64a18a9
raw
history blame
741 Bytes
{
"epoch": 3.0,
"eval_logits/chosen": -1.2629982233047485,
"eval_logits/rejected": -1.075066089630127,
"eval_logps/chosen": -588.9970092773438,
"eval_logps/rejected": -633.47216796875,
"eval_loss": -7.541553497314453,
"eval_rewards/accuracies": 0.6150793433189392,
"eval_rewards/chosen": -29.919435501098633,
"eval_rewards/margins": 9.934508323669434,
"eval_rewards/rejected": -39.853946685791016,
"eval_runtime": 237.8142,
"eval_samples": 2000,
"eval_samples_per_second": 8.41,
"eval_steps_per_second": 0.265,
"train_loss": -1.932115889119454,
"train_runtime": 45081.596,
"train_samples": 61966,
"train_samples_per_second": 4.124,
"train_steps_per_second": 0.064
}