deepseek-math-base-LORA-MWP-2k / trainer_state.json
UltimoUno's picture
Uploaded checkpoint-2000
9b90794 verified
raw
history blame contribute delete
No virus
32.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.7172314864622557,
"eval_steps": 2000,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.9492120146751404,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.9769,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 1.344916582107544,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.0718,
"step": 20
},
{
"epoch": 0.01,
"grad_norm": 0.6680417060852051,
"learning_rate": 3e-06,
"loss": 0.8901,
"step": 30
},
{
"epoch": 0.01,
"grad_norm": 0.8369797468185425,
"learning_rate": 4.000000000000001e-06,
"loss": 0.9398,
"step": 40
},
{
"epoch": 0.02,
"grad_norm": 0.5738756656646729,
"learning_rate": 5e-06,
"loss": 0.9152,
"step": 50
},
{
"epoch": 0.02,
"grad_norm": 1.028475284576416,
"learning_rate": 6e-06,
"loss": 0.849,
"step": 60
},
{
"epoch": 0.03,
"grad_norm": 1.2893372774124146,
"learning_rate": 7e-06,
"loss": 0.7312,
"step": 70
},
{
"epoch": 0.03,
"grad_norm": 0.8779547810554504,
"learning_rate": 8.000000000000001e-06,
"loss": 0.6707,
"step": 80
},
{
"epoch": 0.03,
"grad_norm": 0.8972748517990112,
"learning_rate": 9e-06,
"loss": 0.6413,
"step": 90
},
{
"epoch": 0.04,
"grad_norm": 1.505399227142334,
"learning_rate": 1e-05,
"loss": 0.6129,
"step": 100
},
{
"epoch": 0.04,
"grad_norm": 0.6442121267318726,
"learning_rate": 9.989898989898991e-06,
"loss": 0.5579,
"step": 110
},
{
"epoch": 0.04,
"grad_norm": 0.623290479183197,
"learning_rate": 9.97979797979798e-06,
"loss": 0.6022,
"step": 120
},
{
"epoch": 0.05,
"grad_norm": 0.7216657996177673,
"learning_rate": 9.96969696969697e-06,
"loss": 0.5481,
"step": 130
},
{
"epoch": 0.05,
"grad_norm": 0.7031328678131104,
"learning_rate": 9.95959595959596e-06,
"loss": 0.5594,
"step": 140
},
{
"epoch": 0.05,
"grad_norm": 0.7411965727806091,
"learning_rate": 9.94949494949495e-06,
"loss": 0.6241,
"step": 150
},
{
"epoch": 0.06,
"grad_norm": 0.8735277652740479,
"learning_rate": 9.939393939393939e-06,
"loss": 0.498,
"step": 160
},
{
"epoch": 0.06,
"grad_norm": 1.1062073707580566,
"learning_rate": 9.92929292929293e-06,
"loss": 0.6463,
"step": 170
},
{
"epoch": 0.06,
"grad_norm": 0.7930333614349365,
"learning_rate": 9.91919191919192e-06,
"loss": 0.6231,
"step": 180
},
{
"epoch": 0.07,
"grad_norm": 0.8774802684783936,
"learning_rate": 9.90909090909091e-06,
"loss": 0.5895,
"step": 190
},
{
"epoch": 0.07,
"grad_norm": 0.929542064666748,
"learning_rate": 9.8989898989899e-06,
"loss": 0.632,
"step": 200
},
{
"epoch": 0.08,
"grad_norm": 0.6024735569953918,
"learning_rate": 9.88888888888889e-06,
"loss": 0.5682,
"step": 210
},
{
"epoch": 0.08,
"grad_norm": 0.5887441635131836,
"learning_rate": 9.87878787878788e-06,
"loss": 0.5192,
"step": 220
},
{
"epoch": 0.08,
"grad_norm": 0.7813496589660645,
"learning_rate": 9.86868686868687e-06,
"loss": 0.5841,
"step": 230
},
{
"epoch": 0.09,
"grad_norm": 0.5949407815933228,
"learning_rate": 9.85858585858586e-06,
"loss": 0.5263,
"step": 240
},
{
"epoch": 0.09,
"grad_norm": 0.8114556670188904,
"learning_rate": 9.84848484848485e-06,
"loss": 0.5513,
"step": 250
},
{
"epoch": 0.09,
"grad_norm": 0.7692855596542358,
"learning_rate": 9.838383838383839e-06,
"loss": 0.5217,
"step": 260
},
{
"epoch": 0.1,
"grad_norm": 0.8319826126098633,
"learning_rate": 9.828282828282829e-06,
"loss": 0.4386,
"step": 270
},
{
"epoch": 0.1,
"grad_norm": 0.6725042462348938,
"learning_rate": 9.81818181818182e-06,
"loss": 0.4919,
"step": 280
},
{
"epoch": 0.1,
"grad_norm": 0.779315173625946,
"learning_rate": 9.80808080808081e-06,
"loss": 0.5281,
"step": 290
},
{
"epoch": 0.11,
"grad_norm": 0.8005223274230957,
"learning_rate": 9.797979797979798e-06,
"loss": 0.5206,
"step": 300
},
{
"epoch": 0.11,
"grad_norm": 0.6395801901817322,
"learning_rate": 9.787878787878788e-06,
"loss": 0.474,
"step": 310
},
{
"epoch": 0.11,
"grad_norm": 1.1016992330551147,
"learning_rate": 9.777777777777779e-06,
"loss": 0.5403,
"step": 320
},
{
"epoch": 0.12,
"grad_norm": 1.395189881324768,
"learning_rate": 9.767676767676767e-06,
"loss": 0.5181,
"step": 330
},
{
"epoch": 0.12,
"grad_norm": 0.801499605178833,
"learning_rate": 9.757575757575758e-06,
"loss": 0.5296,
"step": 340
},
{
"epoch": 0.13,
"grad_norm": 0.7606889605522156,
"learning_rate": 9.747474747474748e-06,
"loss": 0.5406,
"step": 350
},
{
"epoch": 0.13,
"grad_norm": 0.580170750617981,
"learning_rate": 9.737373737373738e-06,
"loss": 0.541,
"step": 360
},
{
"epoch": 0.13,
"grad_norm": 0.6730871796607971,
"learning_rate": 9.727272727272728e-06,
"loss": 0.5665,
"step": 370
},
{
"epoch": 0.14,
"grad_norm": 1.240430474281311,
"learning_rate": 9.717171717171719e-06,
"loss": 0.5148,
"step": 380
},
{
"epoch": 0.14,
"grad_norm": 0.9439683556556702,
"learning_rate": 9.707070707070709e-06,
"loss": 0.513,
"step": 390
},
{
"epoch": 0.14,
"grad_norm": 0.6674547791481018,
"learning_rate": 9.696969696969698e-06,
"loss": 0.5202,
"step": 400
},
{
"epoch": 0.15,
"grad_norm": 0.8516045808792114,
"learning_rate": 9.686868686868688e-06,
"loss": 0.5162,
"step": 410
},
{
"epoch": 0.15,
"grad_norm": 0.8700432181358337,
"learning_rate": 9.676767676767678e-06,
"loss": 0.5392,
"step": 420
},
{
"epoch": 0.15,
"grad_norm": 0.5687388777732849,
"learning_rate": 9.666666666666667e-06,
"loss": 0.5106,
"step": 430
},
{
"epoch": 0.16,
"grad_norm": 1.2382631301879883,
"learning_rate": 9.656565656565657e-06,
"loss": 0.5074,
"step": 440
},
{
"epoch": 0.16,
"grad_norm": 0.8921974301338196,
"learning_rate": 9.646464646464647e-06,
"loss": 0.561,
"step": 450
},
{
"epoch": 0.16,
"grad_norm": 1.3508048057556152,
"learning_rate": 9.636363636363638e-06,
"loss": 0.5484,
"step": 460
},
{
"epoch": 0.17,
"grad_norm": 1.1822205781936646,
"learning_rate": 9.626262626262626e-06,
"loss": 0.5719,
"step": 470
},
{
"epoch": 0.17,
"grad_norm": 0.6617271304130554,
"learning_rate": 9.616161616161616e-06,
"loss": 0.535,
"step": 480
},
{
"epoch": 0.18,
"grad_norm": 0.6614571809768677,
"learning_rate": 9.606060606060607e-06,
"loss": 0.4549,
"step": 490
},
{
"epoch": 0.18,
"grad_norm": 0.8642953038215637,
"learning_rate": 9.595959595959597e-06,
"loss": 0.4789,
"step": 500
},
{
"epoch": 0.18,
"grad_norm": 0.614743173122406,
"learning_rate": 9.585858585858586e-06,
"loss": 0.4854,
"step": 510
},
{
"epoch": 0.19,
"grad_norm": 0.970829427242279,
"learning_rate": 9.575757575757576e-06,
"loss": 0.5196,
"step": 520
},
{
"epoch": 0.19,
"grad_norm": 0.7311980128288269,
"learning_rate": 9.565656565656566e-06,
"loss": 0.5106,
"step": 530
},
{
"epoch": 0.19,
"grad_norm": 0.765849769115448,
"learning_rate": 9.555555555555556e-06,
"loss": 0.5782,
"step": 540
},
{
"epoch": 0.2,
"grad_norm": 1.0889312028884888,
"learning_rate": 9.545454545454547e-06,
"loss": 0.5824,
"step": 550
},
{
"epoch": 0.2,
"grad_norm": 0.7402384877204895,
"learning_rate": 9.535353535353537e-06,
"loss": 0.5005,
"step": 560
},
{
"epoch": 0.2,
"grad_norm": 0.707028329372406,
"learning_rate": 9.525252525252526e-06,
"loss": 0.5233,
"step": 570
},
{
"epoch": 0.21,
"grad_norm": 0.8338315486907959,
"learning_rate": 9.515151515151516e-06,
"loss": 0.4694,
"step": 580
},
{
"epoch": 0.21,
"grad_norm": 0.7450662851333618,
"learning_rate": 9.505050505050506e-06,
"loss": 0.4762,
"step": 590
},
{
"epoch": 0.22,
"grad_norm": 0.7595840692520142,
"learning_rate": 9.494949494949497e-06,
"loss": 0.5018,
"step": 600
},
{
"epoch": 0.22,
"grad_norm": 0.5880123376846313,
"learning_rate": 9.484848484848485e-06,
"loss": 0.5228,
"step": 610
},
{
"epoch": 0.22,
"grad_norm": 0.9635146260261536,
"learning_rate": 9.474747474747475e-06,
"loss": 0.4987,
"step": 620
},
{
"epoch": 0.23,
"grad_norm": 1.1274373531341553,
"learning_rate": 9.464646464646466e-06,
"loss": 0.5085,
"step": 630
},
{
"epoch": 0.23,
"grad_norm": 1.1324831247329712,
"learning_rate": 9.454545454545456e-06,
"loss": 0.504,
"step": 640
},
{
"epoch": 0.23,
"grad_norm": 0.5410157442092896,
"learning_rate": 9.444444444444445e-06,
"loss": 0.4619,
"step": 650
},
{
"epoch": 0.24,
"grad_norm": 0.7583281993865967,
"learning_rate": 9.434343434343435e-06,
"loss": 0.5162,
"step": 660
},
{
"epoch": 0.24,
"grad_norm": 0.6546668410301208,
"learning_rate": 9.424242424242425e-06,
"loss": 0.4969,
"step": 670
},
{
"epoch": 0.24,
"grad_norm": 0.6070376634597778,
"learning_rate": 9.414141414141414e-06,
"loss": 0.4805,
"step": 680
},
{
"epoch": 0.25,
"grad_norm": 1.0108693838119507,
"learning_rate": 9.404040404040404e-06,
"loss": 0.4808,
"step": 690
},
{
"epoch": 0.25,
"grad_norm": 0.8799183368682861,
"learning_rate": 9.393939393939396e-06,
"loss": 0.5082,
"step": 700
},
{
"epoch": 0.25,
"grad_norm": 1.3432070016860962,
"learning_rate": 9.383838383838385e-06,
"loss": 0.4353,
"step": 710
},
{
"epoch": 0.26,
"grad_norm": 0.6518195271492004,
"learning_rate": 9.373737373737375e-06,
"loss": 0.4933,
"step": 720
},
{
"epoch": 0.26,
"grad_norm": 0.6736329793930054,
"learning_rate": 9.363636363636365e-06,
"loss": 0.5342,
"step": 730
},
{
"epoch": 0.27,
"grad_norm": 0.776785135269165,
"learning_rate": 9.353535353535354e-06,
"loss": 0.5162,
"step": 740
},
{
"epoch": 0.27,
"grad_norm": 0.9443957805633545,
"learning_rate": 9.343434343434344e-06,
"loss": 0.4486,
"step": 750
},
{
"epoch": 0.27,
"grad_norm": 0.8882728815078735,
"learning_rate": 9.333333333333334e-06,
"loss": 0.4774,
"step": 760
},
{
"epoch": 0.28,
"grad_norm": 0.819239616394043,
"learning_rate": 9.323232323232325e-06,
"loss": 0.5,
"step": 770
},
{
"epoch": 0.28,
"grad_norm": 0.8829065561294556,
"learning_rate": 9.313131313131313e-06,
"loss": 0.4655,
"step": 780
},
{
"epoch": 0.28,
"grad_norm": 1.1993345022201538,
"learning_rate": 9.303030303030303e-06,
"loss": 0.5392,
"step": 790
},
{
"epoch": 0.29,
"grad_norm": 0.681409478187561,
"learning_rate": 9.292929292929294e-06,
"loss": 0.5076,
"step": 800
},
{
"epoch": 0.29,
"grad_norm": 1.075088381767273,
"learning_rate": 9.282828282828284e-06,
"loss": 0.4953,
"step": 810
},
{
"epoch": 0.29,
"grad_norm": 0.7040195465087891,
"learning_rate": 9.272727272727273e-06,
"loss": 0.5596,
"step": 820
},
{
"epoch": 0.3,
"grad_norm": 1.1210192441940308,
"learning_rate": 9.262626262626263e-06,
"loss": 0.5299,
"step": 830
},
{
"epoch": 0.3,
"grad_norm": 0.583011269569397,
"learning_rate": 9.252525252525253e-06,
"loss": 0.497,
"step": 840
},
{
"epoch": 0.3,
"grad_norm": 0.6583887338638306,
"learning_rate": 9.242424242424244e-06,
"loss": 0.5556,
"step": 850
},
{
"epoch": 0.31,
"grad_norm": 0.8040810227394104,
"learning_rate": 9.232323232323232e-06,
"loss": 0.5257,
"step": 860
},
{
"epoch": 0.31,
"grad_norm": 0.9269919991493225,
"learning_rate": 9.222222222222224e-06,
"loss": 0.4421,
"step": 870
},
{
"epoch": 0.32,
"grad_norm": 0.9947918653488159,
"learning_rate": 9.212121212121213e-06,
"loss": 0.5297,
"step": 880
},
{
"epoch": 0.32,
"grad_norm": 0.6900811791419983,
"learning_rate": 9.202020202020203e-06,
"loss": 0.4833,
"step": 890
},
{
"epoch": 0.32,
"grad_norm": 0.8033557534217834,
"learning_rate": 9.191919191919193e-06,
"loss": 0.4894,
"step": 900
},
{
"epoch": 0.33,
"grad_norm": 0.635124683380127,
"learning_rate": 9.181818181818184e-06,
"loss": 0.4554,
"step": 910
},
{
"epoch": 0.33,
"grad_norm": 0.7293840646743774,
"learning_rate": 9.171717171717172e-06,
"loss": 0.4693,
"step": 920
},
{
"epoch": 0.33,
"grad_norm": 0.7628031373023987,
"learning_rate": 9.161616161616162e-06,
"loss": 0.5181,
"step": 930
},
{
"epoch": 0.34,
"grad_norm": 1.0783181190490723,
"learning_rate": 9.151515151515153e-06,
"loss": 0.4632,
"step": 940
},
{
"epoch": 0.34,
"grad_norm": 0.5340379476547241,
"learning_rate": 9.141414141414143e-06,
"loss": 0.4664,
"step": 950
},
{
"epoch": 0.34,
"grad_norm": 0.9029551148414612,
"learning_rate": 9.131313131313132e-06,
"loss": 0.5333,
"step": 960
},
{
"epoch": 0.35,
"grad_norm": 0.7257616519927979,
"learning_rate": 9.121212121212122e-06,
"loss": 0.5168,
"step": 970
},
{
"epoch": 0.35,
"grad_norm": 0.761325478553772,
"learning_rate": 9.111111111111112e-06,
"loss": 0.5606,
"step": 980
},
{
"epoch": 0.36,
"grad_norm": 0.8582245707511902,
"learning_rate": 9.1010101010101e-06,
"loss": 0.4332,
"step": 990
},
{
"epoch": 0.36,
"grad_norm": 0.8598415851593018,
"learning_rate": 9.090909090909091e-06,
"loss": 0.5884,
"step": 1000
},
{
"epoch": 0.36,
"grad_norm": 0.8292351365089417,
"learning_rate": 9.080808080808081e-06,
"loss": 0.4848,
"step": 1010
},
{
"epoch": 0.37,
"grad_norm": 1.0559266805648804,
"learning_rate": 9.070707070707072e-06,
"loss": 0.4588,
"step": 1020
},
{
"epoch": 0.37,
"grad_norm": 0.6693033576011658,
"learning_rate": 9.06060606060606e-06,
"loss": 0.5333,
"step": 1030
},
{
"epoch": 0.37,
"grad_norm": 0.8114706873893738,
"learning_rate": 9.050505050505052e-06,
"loss": 0.5166,
"step": 1040
},
{
"epoch": 0.38,
"grad_norm": 0.8659316301345825,
"learning_rate": 9.040404040404042e-06,
"loss": 0.4504,
"step": 1050
},
{
"epoch": 0.38,
"grad_norm": 0.9083582758903503,
"learning_rate": 9.030303030303031e-06,
"loss": 0.5611,
"step": 1060
},
{
"epoch": 0.38,
"grad_norm": 0.6691566109657288,
"learning_rate": 9.020202020202021e-06,
"loss": 0.5192,
"step": 1070
},
{
"epoch": 0.39,
"grad_norm": 0.5889317989349365,
"learning_rate": 9.010101010101012e-06,
"loss": 0.4515,
"step": 1080
},
{
"epoch": 0.39,
"grad_norm": 0.9215373992919922,
"learning_rate": 9e-06,
"loss": 0.4776,
"step": 1090
},
{
"epoch": 0.39,
"grad_norm": 0.7439729571342468,
"learning_rate": 8.98989898989899e-06,
"loss": 0.4656,
"step": 1100
},
{
"epoch": 0.4,
"grad_norm": 1.1780657768249512,
"learning_rate": 8.97979797979798e-06,
"loss": 0.4933,
"step": 1110
},
{
"epoch": 0.4,
"grad_norm": 0.9686077833175659,
"learning_rate": 8.969696969696971e-06,
"loss": 0.5167,
"step": 1120
},
{
"epoch": 0.41,
"grad_norm": 0.829994261264801,
"learning_rate": 8.95959595959596e-06,
"loss": 0.491,
"step": 1130
},
{
"epoch": 0.41,
"grad_norm": 0.6313827633857727,
"learning_rate": 8.94949494949495e-06,
"loss": 0.4864,
"step": 1140
},
{
"epoch": 0.41,
"grad_norm": 0.596537709236145,
"learning_rate": 8.93939393939394e-06,
"loss": 0.4807,
"step": 1150
},
{
"epoch": 0.42,
"grad_norm": 0.8714896440505981,
"learning_rate": 8.92929292929293e-06,
"loss": 0.512,
"step": 1160
},
{
"epoch": 0.42,
"grad_norm": 0.9466399550437927,
"learning_rate": 8.919191919191919e-06,
"loss": 0.4883,
"step": 1170
},
{
"epoch": 0.42,
"grad_norm": 0.7337993383407593,
"learning_rate": 8.90909090909091e-06,
"loss": 0.4757,
"step": 1180
},
{
"epoch": 0.43,
"grad_norm": 0.7684504985809326,
"learning_rate": 8.8989898989899e-06,
"loss": 0.5224,
"step": 1190
},
{
"epoch": 0.43,
"grad_norm": 0.6455299854278564,
"learning_rate": 8.888888888888888e-06,
"loss": 0.5346,
"step": 1200
},
{
"epoch": 0.43,
"grad_norm": 0.7279661893844604,
"learning_rate": 8.87878787878788e-06,
"loss": 0.4845,
"step": 1210
},
{
"epoch": 0.44,
"grad_norm": 0.7996425032615662,
"learning_rate": 8.86868686868687e-06,
"loss": 0.4812,
"step": 1220
},
{
"epoch": 0.44,
"grad_norm": 0.7299336791038513,
"learning_rate": 8.85858585858586e-06,
"loss": 0.4985,
"step": 1230
},
{
"epoch": 0.44,
"grad_norm": 0.7462379932403564,
"learning_rate": 8.84848484848485e-06,
"loss": 0.5742,
"step": 1240
},
{
"epoch": 0.45,
"grad_norm": 0.7165307998657227,
"learning_rate": 8.83838383838384e-06,
"loss": 0.4627,
"step": 1250
},
{
"epoch": 0.45,
"grad_norm": 0.7239411473274231,
"learning_rate": 8.82828282828283e-06,
"loss": 0.5469,
"step": 1260
},
{
"epoch": 0.46,
"grad_norm": 0.5761345028877258,
"learning_rate": 8.818181818181819e-06,
"loss": 0.4391,
"step": 1270
},
{
"epoch": 0.46,
"grad_norm": 0.8207817077636719,
"learning_rate": 8.808080808080809e-06,
"loss": 0.5477,
"step": 1280
},
{
"epoch": 0.46,
"grad_norm": 0.9331930875778198,
"learning_rate": 8.7979797979798e-06,
"loss": 0.4975,
"step": 1290
},
{
"epoch": 0.47,
"grad_norm": 1.0401968955993652,
"learning_rate": 8.787878787878788e-06,
"loss": 0.5149,
"step": 1300
},
{
"epoch": 0.47,
"grad_norm": 0.7848596572875977,
"learning_rate": 8.777777777777778e-06,
"loss": 0.5448,
"step": 1310
},
{
"epoch": 0.47,
"grad_norm": 1.5979989767074585,
"learning_rate": 8.767676767676768e-06,
"loss": 0.4861,
"step": 1320
},
{
"epoch": 0.48,
"grad_norm": 0.7734145522117615,
"learning_rate": 8.757575757575759e-06,
"loss": 0.5955,
"step": 1330
},
{
"epoch": 0.48,
"grad_norm": 0.9506089687347412,
"learning_rate": 8.747474747474747e-06,
"loss": 0.5212,
"step": 1340
},
{
"epoch": 0.48,
"grad_norm": 0.7118907570838928,
"learning_rate": 8.737373737373738e-06,
"loss": 0.474,
"step": 1350
},
{
"epoch": 0.49,
"grad_norm": 1.0045005083084106,
"learning_rate": 8.727272727272728e-06,
"loss": 0.509,
"step": 1360
},
{
"epoch": 0.49,
"grad_norm": 1.1283302307128906,
"learning_rate": 8.717171717171718e-06,
"loss": 0.5096,
"step": 1370
},
{
"epoch": 0.49,
"grad_norm": 1.3884085416793823,
"learning_rate": 8.707070707070707e-06,
"loss": 0.5008,
"step": 1380
},
{
"epoch": 0.5,
"grad_norm": 0.7452566027641296,
"learning_rate": 8.696969696969699e-06,
"loss": 0.4792,
"step": 1390
},
{
"epoch": 0.5,
"grad_norm": 1.3434414863586426,
"learning_rate": 8.686868686868687e-06,
"loss": 0.5385,
"step": 1400
},
{
"epoch": 0.51,
"grad_norm": 0.9600369930267334,
"learning_rate": 8.676767676767678e-06,
"loss": 0.4743,
"step": 1410
},
{
"epoch": 0.51,
"grad_norm": 0.6895599961280823,
"learning_rate": 8.666666666666668e-06,
"loss": 0.5639,
"step": 1420
},
{
"epoch": 0.51,
"grad_norm": 0.9460670948028564,
"learning_rate": 8.656565656565658e-06,
"loss": 0.5957,
"step": 1430
},
{
"epoch": 0.52,
"grad_norm": 0.6181186437606812,
"learning_rate": 8.646464646464647e-06,
"loss": 0.5032,
"step": 1440
},
{
"epoch": 0.52,
"grad_norm": 0.6992838382720947,
"learning_rate": 8.636363636363637e-06,
"loss": 0.5371,
"step": 1450
},
{
"epoch": 0.52,
"grad_norm": 0.6007195711135864,
"learning_rate": 8.626262626262627e-06,
"loss": 0.4446,
"step": 1460
},
{
"epoch": 0.53,
"grad_norm": 0.8171485662460327,
"learning_rate": 8.616161616161618e-06,
"loss": 0.4835,
"step": 1470
},
{
"epoch": 0.53,
"grad_norm": 0.9369902610778809,
"learning_rate": 8.606060606060606e-06,
"loss": 0.5437,
"step": 1480
},
{
"epoch": 0.53,
"grad_norm": 1.1894652843475342,
"learning_rate": 8.595959595959596e-06,
"loss": 0.5151,
"step": 1490
},
{
"epoch": 0.54,
"grad_norm": 0.694837212562561,
"learning_rate": 8.585858585858587e-06,
"loss": 0.488,
"step": 1500
},
{
"epoch": 0.54,
"grad_norm": 0.8813522458076477,
"learning_rate": 8.575757575757575e-06,
"loss": 0.5129,
"step": 1510
},
{
"epoch": 0.55,
"grad_norm": 1.0121350288391113,
"learning_rate": 8.565656565656566e-06,
"loss": 0.5165,
"step": 1520
},
{
"epoch": 0.55,
"grad_norm": 0.7935439944267273,
"learning_rate": 8.555555555555556e-06,
"loss": 0.4736,
"step": 1530
},
{
"epoch": 0.55,
"grad_norm": 0.7670463919639587,
"learning_rate": 8.545454545454546e-06,
"loss": 0.5214,
"step": 1540
},
{
"epoch": 0.56,
"grad_norm": 1.212927222251892,
"learning_rate": 8.535353535353535e-06,
"loss": 0.5125,
"step": 1550
},
{
"epoch": 0.56,
"grad_norm": 0.7966919541358948,
"learning_rate": 8.525252525252527e-06,
"loss": 0.4823,
"step": 1560
},
{
"epoch": 0.56,
"grad_norm": 1.0880494117736816,
"learning_rate": 8.515151515151517e-06,
"loss": 0.478,
"step": 1570
},
{
"epoch": 0.57,
"grad_norm": 1.0308737754821777,
"learning_rate": 8.505050505050506e-06,
"loss": 0.5368,
"step": 1580
},
{
"epoch": 0.57,
"grad_norm": 0.7291275262832642,
"learning_rate": 8.494949494949496e-06,
"loss": 0.4838,
"step": 1590
},
{
"epoch": 0.57,
"grad_norm": 0.6764214038848877,
"learning_rate": 8.484848484848486e-06,
"loss": 0.4882,
"step": 1600
},
{
"epoch": 0.58,
"grad_norm": 1.051628828048706,
"learning_rate": 8.474747474747475e-06,
"loss": 0.4564,
"step": 1610
},
{
"epoch": 0.58,
"grad_norm": 0.8614614605903625,
"learning_rate": 8.464646464646465e-06,
"loss": 0.5632,
"step": 1620
},
{
"epoch": 0.58,
"grad_norm": 1.1045228242874146,
"learning_rate": 8.454545454545455e-06,
"loss": 0.4535,
"step": 1630
},
{
"epoch": 0.59,
"grad_norm": 0.8160364031791687,
"learning_rate": 8.444444444444446e-06,
"loss": 0.4964,
"step": 1640
},
{
"epoch": 0.59,
"grad_norm": 0.7776429653167725,
"learning_rate": 8.434343434343434e-06,
"loss": 0.4554,
"step": 1650
},
{
"epoch": 0.6,
"grad_norm": 0.7589672207832336,
"learning_rate": 8.424242424242425e-06,
"loss": 0.4458,
"step": 1660
},
{
"epoch": 0.6,
"grad_norm": 0.825233519077301,
"learning_rate": 8.414141414141415e-06,
"loss": 0.4814,
"step": 1670
},
{
"epoch": 0.6,
"grad_norm": 0.8226912617683411,
"learning_rate": 8.404040404040405e-06,
"loss": 0.4892,
"step": 1680
},
{
"epoch": 0.61,
"grad_norm": 0.9273412823677063,
"learning_rate": 8.393939393939394e-06,
"loss": 0.5143,
"step": 1690
},
{
"epoch": 0.61,
"grad_norm": 0.9043828248977661,
"learning_rate": 8.383838383838384e-06,
"loss": 0.4598,
"step": 1700
},
{
"epoch": 0.61,
"grad_norm": 2.2596805095672607,
"learning_rate": 8.373737373737374e-06,
"loss": 0.5415,
"step": 1710
},
{
"epoch": 0.62,
"grad_norm": 1.0041202306747437,
"learning_rate": 8.363636363636365e-06,
"loss": 0.5899,
"step": 1720
},
{
"epoch": 0.62,
"grad_norm": 0.9188370704650879,
"learning_rate": 8.353535353535355e-06,
"loss": 0.5119,
"step": 1730
},
{
"epoch": 0.62,
"grad_norm": 0.7778961062431335,
"learning_rate": 8.343434343434345e-06,
"loss": 0.5237,
"step": 1740
},
{
"epoch": 0.63,
"grad_norm": 0.7438649535179138,
"learning_rate": 8.333333333333334e-06,
"loss": 0.4999,
"step": 1750
},
{
"epoch": 0.63,
"grad_norm": 0.5649489760398865,
"learning_rate": 8.323232323232324e-06,
"loss": 0.552,
"step": 1760
},
{
"epoch": 0.63,
"grad_norm": 0.5625451803207397,
"learning_rate": 8.313131313131314e-06,
"loss": 0.4549,
"step": 1770
},
{
"epoch": 0.64,
"grad_norm": 1.3711755275726318,
"learning_rate": 8.303030303030305e-06,
"loss": 0.445,
"step": 1780
},
{
"epoch": 0.64,
"grad_norm": 1.4339165687561035,
"learning_rate": 8.292929292929293e-06,
"loss": 0.4975,
"step": 1790
},
{
"epoch": 0.65,
"grad_norm": 0.8113200068473816,
"learning_rate": 8.282828282828283e-06,
"loss": 0.5288,
"step": 1800
},
{
"epoch": 0.65,
"grad_norm": 1.1567124128341675,
"learning_rate": 8.272727272727274e-06,
"loss": 0.4669,
"step": 1810
},
{
"epoch": 0.65,
"grad_norm": 0.7966761589050293,
"learning_rate": 8.262626262626264e-06,
"loss": 0.4856,
"step": 1820
},
{
"epoch": 0.66,
"grad_norm": 1.0181186199188232,
"learning_rate": 8.252525252525253e-06,
"loss": 0.4576,
"step": 1830
},
{
"epoch": 0.66,
"grad_norm": 0.611566960811615,
"learning_rate": 8.242424242424243e-06,
"loss": 0.496,
"step": 1840
},
{
"epoch": 0.66,
"grad_norm": 0.6482832431793213,
"learning_rate": 8.232323232323233e-06,
"loss": 0.4601,
"step": 1850
},
{
"epoch": 0.67,
"grad_norm": 0.7550622820854187,
"learning_rate": 8.222222222222222e-06,
"loss": 0.5036,
"step": 1860
},
{
"epoch": 0.67,
"grad_norm": 0.7835694551467896,
"learning_rate": 8.212121212121212e-06,
"loss": 0.5617,
"step": 1870
},
{
"epoch": 0.67,
"grad_norm": 0.7926068305969238,
"learning_rate": 8.202020202020202e-06,
"loss": 0.4327,
"step": 1880
},
{
"epoch": 0.68,
"grad_norm": 0.786851167678833,
"learning_rate": 8.191919191919193e-06,
"loss": 0.4654,
"step": 1890
},
{
"epoch": 0.68,
"grad_norm": 0.9023171663284302,
"learning_rate": 8.181818181818183e-06,
"loss": 0.5426,
"step": 1900
},
{
"epoch": 0.68,
"grad_norm": 1.0345401763916016,
"learning_rate": 8.171717171717173e-06,
"loss": 0.52,
"step": 1910
},
{
"epoch": 0.69,
"grad_norm": 0.945004940032959,
"learning_rate": 8.161616161616162e-06,
"loss": 0.5512,
"step": 1920
},
{
"epoch": 0.69,
"grad_norm": 0.658362090587616,
"learning_rate": 8.151515151515152e-06,
"loss": 0.513,
"step": 1930
},
{
"epoch": 0.7,
"grad_norm": 0.6390058398246765,
"learning_rate": 8.141414141414142e-06,
"loss": 0.5652,
"step": 1940
},
{
"epoch": 0.7,
"grad_norm": 0.7705880403518677,
"learning_rate": 8.131313131313133e-06,
"loss": 0.4929,
"step": 1950
},
{
"epoch": 0.7,
"grad_norm": 0.5400047302246094,
"learning_rate": 8.121212121212121e-06,
"loss": 0.4932,
"step": 1960
},
{
"epoch": 0.71,
"grad_norm": 0.9128320217132568,
"learning_rate": 8.111111111111112e-06,
"loss": 0.5085,
"step": 1970
},
{
"epoch": 0.71,
"grad_norm": 1.0019017457962036,
"learning_rate": 8.101010101010102e-06,
"loss": 0.4552,
"step": 1980
},
{
"epoch": 0.71,
"grad_norm": 0.818148136138916,
"learning_rate": 8.090909090909092e-06,
"loss": 0.5517,
"step": 1990
},
{
"epoch": 0.72,
"grad_norm": 0.8848174810409546,
"learning_rate": 8.08080808080808e-06,
"loss": 0.5094,
"step": 2000
},
{
"epoch": 0.72,
"eval_loss": 0.6485620141029358,
"eval_runtime": 340.5095,
"eval_samples_per_second": 2.937,
"eval_steps_per_second": 2.937,
"step": 2000
}
],
"logging_steps": 10,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 2000,
"total_flos": 1.63205502468096e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}