Mel-Iza0's picture
Upload folder using huggingface_hub
0cc2785 verified
raw
history blame contribute delete
No virus
10.1 kB
{
"best_metric": 1.9277071952819824,
"best_model_checkpoint": "./Zephyr/08-03-24-Weni-WeniGPT-2.10.1-Zephyr-7B-DPO-prompt-binarized-GPTQ_DPO tests with binarized dataset GPTQ-2_max_steps-896_batch_16_2024-03-08_ppid_7990/checkpoint-100",
"epoch": 2.6666666666666665,
"eval_steps": 100,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.18,
"grad_norm": 119.0400161743164,
"learning_rate": 3.111111111111111e-05,
"logits/chosen": -2.6437883377075195,
"logits/rejected": -2.640676498413086,
"logps/chosen": -346.0354309082031,
"logps/rejected": -315.1640319824219,
"loss": 0.6706,
"rewards/accuracies": 0.30000001192092896,
"rewards/chosen": 0.2039356678724289,
"rewards/margins": 0.1839628666639328,
"rewards/rejected": 0.019972801208496094,
"step": 20
},
{
"epoch": 0.36,
"grad_norm": 55.844058990478516,
"learning_rate": 7.555555555555556e-05,
"logits/chosen": -2.7053043842315674,
"logits/rejected": -2.706561326980591,
"logps/chosen": -348.75,
"logps/rejected": -336.07830810546875,
"loss": 0.7255,
"rewards/accuracies": 0.5062500238418579,
"rewards/chosen": -0.3999950885772705,
"rewards/margins": 0.9534912109375,
"rewards/rejected": -1.3534862995147705,
"step": 40
},
{
"epoch": 0.53,
"grad_norm": 156.59803771972656,
"learning_rate": 0.00011777777777777779,
"logits/chosen": -2.644763946533203,
"logits/rejected": -2.6682467460632324,
"logps/chosen": -352.20233154296875,
"logps/rejected": -343.33941650390625,
"loss": 0.8382,
"rewards/accuracies": 0.5718749761581421,
"rewards/chosen": 2.873349189758301,
"rewards/margins": 1.9845993518829346,
"rewards/rejected": 0.8887494802474976,
"step": 60
},
{
"epoch": 0.71,
"grad_norm": 113.42001342773438,
"learning_rate": 0.00016222222222222224,
"logits/chosen": -2.652919292449951,
"logits/rejected": -2.663282871246338,
"logps/chosen": -350.60443115234375,
"logps/rejected": -333.491455078125,
"loss": 1.6409,
"rewards/accuracies": 0.512499988079071,
"rewards/chosen": 4.428155899047852,
"rewards/margins": 2.1724541187286377,
"rewards/rejected": 2.255702018737793,
"step": 80
},
{
"epoch": 0.89,
"grad_norm": 77.21934509277344,
"learning_rate": 0.0001992555831265509,
"logits/chosen": -2.6474595069885254,
"logits/rejected": -2.6597702503204346,
"logps/chosen": -318.25653076171875,
"logps/rejected": -302.04241943359375,
"loss": 1.9207,
"rewards/accuracies": 0.4937500059604645,
"rewards/chosen": 14.573875427246094,
"rewards/margins": 3.523595094680786,
"rewards/rejected": 11.050280570983887,
"step": 100
},
{
"epoch": 0.89,
"eval_logits/chosen": -2.7368459701538086,
"eval_logits/rejected": -2.746166467666626,
"eval_logps/chosen": -334.51287841796875,
"eval_logps/rejected": -329.4024658203125,
"eval_loss": 1.9277071952819824,
"eval_rewards/accuracies": 0.5350000262260437,
"eval_rewards/chosen": 18.78862190246582,
"eval_rewards/margins": 5.63677978515625,
"eval_rewards/rejected": 13.151841163635254,
"eval_runtime": 94.4139,
"eval_samples_per_second": 2.118,
"eval_steps_per_second": 0.53,
"step": 100
},
{
"epoch": 1.07,
"grad_norm": 83.98187255859375,
"learning_rate": 0.00019429280397022334,
"logits/chosen": -2.639448642730713,
"logits/rejected": -2.6476964950561523,
"logps/chosen": -316.87982177734375,
"logps/rejected": -308.38568115234375,
"loss": 1.7601,
"rewards/accuracies": 0.5562499761581421,
"rewards/chosen": 27.463403701782227,
"rewards/margins": 10.681607246398926,
"rewards/rejected": 16.781795501708984,
"step": 120
},
{
"epoch": 1.24,
"grad_norm": 0.0,
"learning_rate": 0.0001893300248138958,
"logits/chosen": -2.6575229167938232,
"logits/rejected": -2.6546902656555176,
"logps/chosen": -350.1793212890625,
"logps/rejected": -348.28985595703125,
"loss": 1.8357,
"rewards/accuracies": 0.6156250238418579,
"rewards/chosen": -23.570585250854492,
"rewards/margins": 16.757661819458008,
"rewards/rejected": -40.32825469970703,
"step": 140
},
{
"epoch": 1.42,
"grad_norm": 1.0552074909210205,
"learning_rate": 0.00018436724565756824,
"logits/chosen": -2.690781831741333,
"logits/rejected": -2.691371440887451,
"logps/chosen": -403.81378173828125,
"logps/rejected": -407.43475341796875,
"loss": 1.4241,
"rewards/accuracies": 0.6343749761581421,
"rewards/chosen": -46.7193489074707,
"rewards/margins": 20.419803619384766,
"rewards/rejected": -67.13915252685547,
"step": 160
},
{
"epoch": 1.6,
"grad_norm": 88.9915771484375,
"learning_rate": 0.0001794044665012407,
"logits/chosen": -2.6750125885009766,
"logits/rejected": -2.682309627532959,
"logps/chosen": -399.62066650390625,
"logps/rejected": -409.8529357910156,
"loss": 1.9108,
"rewards/accuracies": 0.621874988079071,
"rewards/chosen": -49.57743453979492,
"rewards/margins": 20.643938064575195,
"rewards/rejected": -70.22136688232422,
"step": 180
},
{
"epoch": 1.78,
"grad_norm": 138.35623168945312,
"learning_rate": 0.00017444168734491314,
"logits/chosen": -2.7050137519836426,
"logits/rejected": -2.712310552597046,
"logps/chosen": -397.1112365722656,
"logps/rejected": -406.0185852050781,
"loss": 2.0578,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": -53.7899284362793,
"rewards/margins": 17.770038604736328,
"rewards/rejected": -71.55997467041016,
"step": 200
},
{
"epoch": 1.78,
"eval_logits/chosen": -2.6775686740875244,
"eval_logits/rejected": -2.6850674152374268,
"eval_logps/chosen": -419.0586242675781,
"eval_logps/rejected": -420.5866394042969,
"eval_loss": 2.5765089988708496,
"eval_rewards/accuracies": 0.550000011920929,
"eval_rewards/chosen": -65.75714874267578,
"eval_rewards/margins": 12.275188446044922,
"eval_rewards/rejected": -78.03234100341797,
"eval_runtime": 94.1202,
"eval_samples_per_second": 2.125,
"eval_steps_per_second": 0.531,
"step": 200
},
{
"epoch": 1.96,
"grad_norm": 30.972963333129883,
"learning_rate": 0.0001694789081885856,
"logits/chosen": -2.631683349609375,
"logits/rejected": -2.6248221397399902,
"logps/chosen": -404.0619201660156,
"logps/rejected": -399.0223083496094,
"loss": 1.8819,
"rewards/accuracies": 0.596875011920929,
"rewards/chosen": -62.639190673828125,
"rewards/margins": 19.827577590942383,
"rewards/rejected": -82.46675872802734,
"step": 220
},
{
"epoch": 2.13,
"grad_norm": 65.85337829589844,
"learning_rate": 0.00016451612903225807,
"logits/chosen": -2.711599826812744,
"logits/rejected": -2.7432861328125,
"logps/chosen": -392.7776794433594,
"logps/rejected": -422.9435119628906,
"loss": 1.381,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -58.72906494140625,
"rewards/margins": 29.807327270507812,
"rewards/rejected": -88.536376953125,
"step": 240
},
{
"epoch": 2.31,
"grad_norm": 86.84841918945312,
"learning_rate": 0.00015955334987593052,
"logits/chosen": -2.8453807830810547,
"logits/rejected": -2.849914073944092,
"logps/chosen": -400.3071594238281,
"logps/rejected": -420.60968017578125,
"loss": 0.9731,
"rewards/accuracies": 0.6875,
"rewards/chosen": -69.37321472167969,
"rewards/margins": 32.82529830932617,
"rewards/rejected": -102.19852447509766,
"step": 260
},
{
"epoch": 2.49,
"grad_norm": 0.010256004519760609,
"learning_rate": 0.000154590570719603,
"logits/chosen": -2.7864909172058105,
"logits/rejected": -2.812525749206543,
"logps/chosen": -376.9371643066406,
"logps/rejected": -395.2316589355469,
"loss": 0.8817,
"rewards/accuracies": 0.703125,
"rewards/chosen": -35.111305236816406,
"rewards/margins": 33.674293518066406,
"rewards/rejected": -68.78559875488281,
"step": 280
},
{
"epoch": 2.67,
"grad_norm": 0.26802292466163635,
"learning_rate": 0.00014962779156327545,
"logits/chosen": -2.7736704349517822,
"logits/rejected": -2.792346477508545,
"logps/chosen": -372.4452209472656,
"logps/rejected": -392.68878173828125,
"loss": 0.3389,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -25.840368270874023,
"rewards/margins": 39.69981002807617,
"rewards/rejected": -65.54017639160156,
"step": 300
},
{
"epoch": 2.67,
"eval_logits/chosen": -2.8219432830810547,
"eval_logits/rejected": -2.8389434814453125,
"eval_logps/chosen": -374.2080078125,
"eval_logps/rejected": -374.9266052246094,
"eval_loss": 3.5374395847320557,
"eval_rewards/accuracies": 0.5400000214576721,
"eval_rewards/chosen": -20.90648651123047,
"eval_rewards/margins": 11.465815544128418,
"eval_rewards/rejected": -32.37229919433594,
"eval_runtime": 94.0956,
"eval_samples_per_second": 2.125,
"eval_steps_per_second": 0.531,
"step": 300
}
],
"logging_steps": 20,
"max_steps": 896,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}