SELM-Llama-3-8B-Instruct-iter-2 / trainer_state.json
ZhangShenao's picture
Model save
fbfa663 verified
raw
history blame
10.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9984301412872841,
"eval_steps": 500,
"global_step": 159,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006279434850863423,
"grad_norm": 17.524188433318294,
"learning_rate": 1.875e-08,
"logits/chosen": 0.050171270966529846,
"logits/rejected": 0.7975481748580933,
"logps/chosen": -213.99826049804688,
"logps/pi_response": -122.33531951904297,
"logps/ref_response": -122.33531951904297,
"logps/rejected": -327.0420227050781,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.06279434850863422,
"grad_norm": 23.45193685304271,
"learning_rate": 1.875e-07,
"logits/chosen": 0.6251156330108643,
"logits/rejected": 0.9096591472625732,
"logps/chosen": -268.11822509765625,
"logps/pi_response": -114.67009735107422,
"logps/ref_response": -114.69570922851562,
"logps/rejected": -390.1163330078125,
"loss": 0.6924,
"rewards/accuracies": 0.5416666865348816,
"rewards/chosen": -0.000606474990490824,
"rewards/margins": 0.0019376208074390888,
"rewards/rejected": -0.0025440959725528955,
"step": 10
},
{
"epoch": 0.12558869701726844,
"grad_norm": 19.551172670207706,
"learning_rate": 2.9942119880575817e-07,
"logits/chosen": 0.4843064248561859,
"logits/rejected": 0.8785603642463684,
"logps/chosen": -269.538330078125,
"logps/pi_response": -121.35018157958984,
"logps/ref_response": -121.34931945800781,
"logps/rejected": -417.247314453125,
"loss": 0.6747,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.0313839390873909,
"rewards/margins": 0.04394400864839554,
"rewards/rejected": -0.07532794773578644,
"step": 20
},
{
"epoch": 0.18838304552590268,
"grad_norm": 13.701658129429845,
"learning_rate": 2.929608750821129e-07,
"logits/chosen": 0.5343824625015259,
"logits/rejected": 0.9908145666122437,
"logps/chosen": -310.638671875,
"logps/pi_response": -118.42143249511719,
"logps/ref_response": -117.36665344238281,
"logps/rejected": -428.20166015625,
"loss": 0.6175,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.18561159074306488,
"rewards/margins": 0.17133654654026031,
"rewards/rejected": -0.3569481372833252,
"step": 30
},
{
"epoch": 0.25117739403453687,
"grad_norm": 14.693007375551543,
"learning_rate": 2.7962832564252725e-07,
"logits/chosen": 0.5080328583717346,
"logits/rejected": 0.9311200976371765,
"logps/chosen": -303.4456481933594,
"logps/pi_response": -121.04774475097656,
"logps/ref_response": -117.7525634765625,
"logps/rejected": -484.09552001953125,
"loss": 0.5785,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.3230215311050415,
"rewards/margins": 0.4571772515773773,
"rewards/rejected": -0.7801988124847412,
"step": 40
},
{
"epoch": 0.3139717425431711,
"grad_norm": 18.087271424489508,
"learning_rate": 2.6006445513357056e-07,
"logits/chosen": 0.6870445013046265,
"logits/rejected": 0.9769166111946106,
"logps/chosen": -342.7091369628906,
"logps/pi_response": -124.7760009765625,
"logps/ref_response": -115.9255599975586,
"logps/rejected": -495.4518127441406,
"loss": 0.5714,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -0.712685227394104,
"rewards/margins": 0.5671727657318115,
"rewards/rejected": -1.2798579931259155,
"step": 50
},
{
"epoch": 0.37676609105180536,
"grad_norm": 11.166951031052399,
"learning_rate": 2.3520971200967334e-07,
"logits/chosen": 0.5992540121078491,
"logits/rejected": 1.020711898803711,
"logps/chosen": -348.0439758300781,
"logps/pi_response": -127.2256088256836,
"logps/ref_response": -119.3854751586914,
"logps/rejected": -496.8860778808594,
"loss": 0.5649,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.5891093015670776,
"rewards/margins": 0.5816227793693542,
"rewards/rejected": -1.1707321405410767,
"step": 60
},
{
"epoch": 0.43956043956043955,
"grad_norm": 11.470597805963651,
"learning_rate": 2.0625888054143427e-07,
"logits/chosen": 0.6002563238143921,
"logits/rejected": 0.9772939682006836,
"logps/chosen": -272.6213073730469,
"logps/pi_response": -127.3548583984375,
"logps/ref_response": -120.81624603271484,
"logps/rejected": -524.6642456054688,
"loss": 0.5372,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.36886435747146606,
"rewards/margins": 0.7560935020446777,
"rewards/rejected": -1.124957799911499,
"step": 70
},
{
"epoch": 0.5023547880690737,
"grad_norm": 11.00220562972207,
"learning_rate": 1.7460364672965327e-07,
"logits/chosen": 0.6907710433006287,
"logits/rejected": 1.0923134088516235,
"logps/chosen": -269.1040954589844,
"logps/pi_response": -112.93983459472656,
"logps/ref_response": -106.69105529785156,
"logps/rejected": -492.8182678222656,
"loss": 0.5271,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.4200579524040222,
"rewards/margins": 0.6648958921432495,
"rewards/rejected": -1.084953784942627,
"step": 80
},
{
"epoch": 0.565149136577708,
"grad_norm": 11.111343223576297,
"learning_rate": 1.4176569902035086e-07,
"logits/chosen": 0.6897394061088562,
"logits/rejected": 1.039529800415039,
"logps/chosen": -341.63275146484375,
"logps/pi_response": -120.40811920166016,
"logps/ref_response": -111.32686614990234,
"logps/rejected": -511.60565185546875,
"loss": 0.5055,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.5421563982963562,
"rewards/margins": 0.7095355987548828,
"rewards/rejected": -1.2516921758651733,
"step": 90
},
{
"epoch": 0.6279434850863422,
"grad_norm": 12.249034688194289,
"learning_rate": 1.0932357971453743e-07,
"logits/chosen": 0.7736852765083313,
"logits/rejected": 1.0484180450439453,
"logps/chosen": -290.811279296875,
"logps/pi_response": -116.5997543334961,
"logps/ref_response": -107.03324127197266,
"logps/rejected": -533.04931640625,
"loss": 0.539,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.49749264121055603,
"rewards/margins": 0.8334757089614868,
"rewards/rejected": -1.3309683799743652,
"step": 100
},
{
"epoch": 0.6907378335949764,
"grad_norm": 10.864663994027794,
"learning_rate": 7.883680337481599e-08,
"logits/chosen": 0.7670079469680786,
"logits/rejected": 0.9956636428833008,
"logps/chosen": -305.14593505859375,
"logps/pi_response": -127.0176773071289,
"logps/ref_response": -116.74520111083984,
"logps/rejected": -521.2225341796875,
"loss": 0.5335,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.5058807134628296,
"rewards/margins": 0.7484409809112549,
"rewards/rejected": -1.2543216943740845,
"step": 110
},
{
"epoch": 0.7535321821036107,
"grad_norm": 11.061608119367962,
"learning_rate": 5.177088990820725e-08,
"logits/chosen": 0.5085455775260925,
"logits/rejected": 0.8686326146125793,
"logps/chosen": -308.0654602050781,
"logps/pi_response": -132.87335205078125,
"logps/ref_response": -123.3708267211914,
"logps/rejected": -556.9476928710938,
"loss": 0.5259,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.4727388918399811,
"rewards/margins": 0.8706821203231812,
"rewards/rejected": -1.3434208631515503,
"step": 120
},
{
"epoch": 0.8163265306122449,
"grad_norm": 13.028590054698721,
"learning_rate": 2.942691603548416e-08,
"logits/chosen": 0.5405811071395874,
"logits/rejected": 1.0355885028839111,
"logps/chosen": -320.9105224609375,
"logps/pi_response": -135.04757690429688,
"logps/ref_response": -127.4443588256836,
"logps/rejected": -553.2946166992188,
"loss": 0.5056,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.4617777466773987,
"rewards/margins": 0.8675910234451294,
"rewards/rejected": -1.3293688297271729,
"step": 130
},
{
"epoch": 0.8791208791208791,
"grad_norm": 11.32561508697925,
"learning_rate": 1.2878971655412513e-08,
"logits/chosen": 0.5967448353767395,
"logits/rejected": 0.9229636192321777,
"logps/chosen": -296.9305725097656,
"logps/pi_response": -139.7073974609375,
"logps/ref_response": -129.02000427246094,
"logps/rejected": -566.9810791015625,
"loss": 0.5117,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -0.4712587893009186,
"rewards/margins": 0.9010453224182129,
"rewards/rejected": -1.3723042011260986,
"step": 140
},
{
"epoch": 0.9419152276295133,
"grad_norm": 11.652819230057418,
"learning_rate": 2.922527618666465e-09,
"logits/chosen": 0.5724425315856934,
"logits/rejected": 0.9267145395278931,
"logps/chosen": -312.5321350097656,
"logps/pi_response": -124.59476470947266,
"logps/ref_response": -114.87628173828125,
"logps/rejected": -503.21697998046875,
"loss": 0.5304,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.5373696088790894,
"rewards/margins": 0.6669692993164062,
"rewards/rejected": -1.204338788986206,
"step": 150
},
{
"epoch": 0.9984301412872841,
"step": 159,
"total_flos": 0.0,
"train_loss": 0.557155561147246,
"train_runtime": 4462.021,
"train_samples_per_second": 4.567,
"train_steps_per_second": 0.036
}
],
"logging_steps": 10,
"max_steps": 159,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}