eljavatar's picture
Upload model finetuned on utg4java-220m using strategy src_fm_fc_ms_ff
b7beef6 verified
raw
history blame
No virus
37.1 kB
{
"best_metric": 1.177372694015503,
"best_model_checkpoint": "/root/finetuning_executions/finetuning_05_utg4java_src_fm_fc_ms_ff/checkpoint-52644",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 87740,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 2.229098081588745,
"learning_rate": 2.4750000000000002e-05,
"loss": 3.3229,
"step": 400
},
{
"epoch": 0.05,
"grad_norm": 1.5058149099349976,
"learning_rate": 4.975e-05,
"loss": 1.748,
"step": 800
},
{
"epoch": 0.07,
"grad_norm": 1.120527982711792,
"learning_rate": 4.977225672877847e-05,
"loss": 1.5462,
"step": 1200
},
{
"epoch": 0.09,
"grad_norm": 1.5305182933807373,
"learning_rate": 4.9542213020473895e-05,
"loss": 1.4719,
"step": 1600
},
{
"epoch": 0.11,
"grad_norm": 1.0366812944412231,
"learning_rate": 4.931216931216932e-05,
"loss": 1.4154,
"step": 2000
},
{
"epoch": 0.14,
"grad_norm": 1.1470097303390503,
"learning_rate": 4.9082125603864734e-05,
"loss": 1.3773,
"step": 2400
},
{
"epoch": 0.16,
"grad_norm": 1.1128191947937012,
"learning_rate": 4.885208189556016e-05,
"loss": 1.3428,
"step": 2800
},
{
"epoch": 0.18,
"grad_norm": 1.253928303718567,
"learning_rate": 4.862203818725558e-05,
"loss": 1.3226,
"step": 3200
},
{
"epoch": 0.21,
"grad_norm": 1.0254652500152588,
"learning_rate": 4.8391994478951e-05,
"loss": 1.291,
"step": 3600
},
{
"epoch": 0.23,
"grad_norm": 1.1027791500091553,
"learning_rate": 4.8161950770646426e-05,
"loss": 1.2715,
"step": 4000
},
{
"epoch": 0.25,
"grad_norm": 1.1553912162780762,
"learning_rate": 4.793190706234185e-05,
"loss": 1.2662,
"step": 4400
},
{
"epoch": 0.27,
"grad_norm": 1.144874930381775,
"learning_rate": 4.770186335403727e-05,
"loss": 1.242,
"step": 4800
},
{
"epoch": 0.3,
"grad_norm": 1.1357582807540894,
"learning_rate": 4.747181964573269e-05,
"loss": 1.2392,
"step": 5200
},
{
"epoch": 0.32,
"grad_norm": 0.8985121250152588,
"learning_rate": 4.724177593742811e-05,
"loss": 1.2093,
"step": 5600
},
{
"epoch": 0.34,
"grad_norm": 0.9756821990013123,
"learning_rate": 4.7011732229123534e-05,
"loss": 1.1905,
"step": 6000
},
{
"epoch": 0.36,
"grad_norm": 1.1240946054458618,
"learning_rate": 4.678168852081896e-05,
"loss": 1.1769,
"step": 6400
},
{
"epoch": 0.39,
"grad_norm": 0.9794073700904846,
"learning_rate": 4.655164481251438e-05,
"loss": 1.1658,
"step": 6800
},
{
"epoch": 0.41,
"grad_norm": 0.977505624294281,
"learning_rate": 4.63216011042098e-05,
"loss": 1.1749,
"step": 7200
},
{
"epoch": 0.43,
"grad_norm": 1.1371684074401855,
"learning_rate": 4.609155739590522e-05,
"loss": 1.1424,
"step": 7600
},
{
"epoch": 0.46,
"grad_norm": 1.0095725059509277,
"learning_rate": 4.586151368760065e-05,
"loss": 1.1417,
"step": 8000
},
{
"epoch": 0.48,
"grad_norm": 1.0289288759231567,
"learning_rate": 4.563146997929607e-05,
"loss": 1.1408,
"step": 8400
},
{
"epoch": 0.5,
"grad_norm": 1.1291728019714355,
"learning_rate": 4.5401426270991495e-05,
"loss": 1.1243,
"step": 8800
},
{
"epoch": 0.52,
"grad_norm": 1.1059273481369019,
"learning_rate": 4.517138256268692e-05,
"loss": 1.1143,
"step": 9200
},
{
"epoch": 0.55,
"grad_norm": 0.8934476971626282,
"learning_rate": 4.4941338854382334e-05,
"loss": 1.1019,
"step": 9600
},
{
"epoch": 0.57,
"grad_norm": 1.0957863330841064,
"learning_rate": 4.471129514607776e-05,
"loss": 1.094,
"step": 10000
},
{
"epoch": 0.59,
"grad_norm": 1.2800277471542358,
"learning_rate": 4.448125143777318e-05,
"loss": 1.0773,
"step": 10400
},
{
"epoch": 0.62,
"grad_norm": 0.9538272619247437,
"learning_rate": 4.42512077294686e-05,
"loss": 1.0952,
"step": 10800
},
{
"epoch": 0.64,
"grad_norm": 0.9216693639755249,
"learning_rate": 4.4021164021164026e-05,
"loss": 1.0725,
"step": 11200
},
{
"epoch": 0.66,
"grad_norm": 0.9263585805892944,
"learning_rate": 4.379112031285945e-05,
"loss": 1.0693,
"step": 11600
},
{
"epoch": 0.68,
"grad_norm": 0.9969348311424255,
"learning_rate": 4.3561076604554865e-05,
"loss": 1.0582,
"step": 12000
},
{
"epoch": 0.71,
"grad_norm": 0.9046760201454163,
"learning_rate": 4.333103289625029e-05,
"loss": 1.062,
"step": 12400
},
{
"epoch": 0.73,
"grad_norm": 1.0008882284164429,
"learning_rate": 4.310098918794571e-05,
"loss": 1.0676,
"step": 12800
},
{
"epoch": 0.75,
"grad_norm": 1.0041269063949585,
"learning_rate": 4.2870945479641134e-05,
"loss": 1.0428,
"step": 13200
},
{
"epoch": 0.78,
"grad_norm": 1.0424519777297974,
"learning_rate": 4.264090177133656e-05,
"loss": 1.0403,
"step": 13600
},
{
"epoch": 0.8,
"grad_norm": 0.9091747999191284,
"learning_rate": 4.241143317230274e-05,
"loss": 1.04,
"step": 14000
},
{
"epoch": 0.82,
"grad_norm": 1.0176922082901,
"learning_rate": 4.218138946399816e-05,
"loss": 1.0271,
"step": 14400
},
{
"epoch": 0.84,
"grad_norm": 1.1477534770965576,
"learning_rate": 4.1951345755693586e-05,
"loss": 1.0325,
"step": 14800
},
{
"epoch": 0.87,
"grad_norm": 0.9284051060676575,
"learning_rate": 4.1721302047389e-05,
"loss": 1.0331,
"step": 15200
},
{
"epoch": 0.89,
"grad_norm": 0.9155886173248291,
"learning_rate": 4.1491258339084425e-05,
"loss": 1.0081,
"step": 15600
},
{
"epoch": 0.91,
"grad_norm": 0.9425845146179199,
"learning_rate": 4.126121463077985e-05,
"loss": 1.0014,
"step": 16000
},
{
"epoch": 0.93,
"grad_norm": 0.9099463224411011,
"learning_rate": 4.103117092247527e-05,
"loss": 1.0117,
"step": 16400
},
{
"epoch": 0.96,
"grad_norm": 0.941250205039978,
"learning_rate": 4.0801127214170694e-05,
"loss": 1.0021,
"step": 16800
},
{
"epoch": 0.98,
"grad_norm": 0.8845394849777222,
"learning_rate": 4.0571083505866117e-05,
"loss": 0.9984,
"step": 17200
},
{
"epoch": 1.0,
"eval_loss": 1.2029205560684204,
"eval_runtime": 240.0634,
"eval_samples_per_second": 251.08,
"eval_steps_per_second": 3.924,
"step": 17548
},
{
"epoch": 1.0,
"grad_norm": 0.8803815841674805,
"learning_rate": 4.034103979756153e-05,
"loss": 0.9937,
"step": 17600
},
{
"epoch": 1.03,
"grad_norm": 0.9823230504989624,
"learning_rate": 4.0110996089256956e-05,
"loss": 0.9468,
"step": 18000
},
{
"epoch": 1.05,
"grad_norm": 0.9398742914199829,
"learning_rate": 3.9880952380952386e-05,
"loss": 0.9499,
"step": 18400
},
{
"epoch": 1.07,
"grad_norm": 1.0421141386032104,
"learning_rate": 3.965090867264781e-05,
"loss": 0.9471,
"step": 18800
},
{
"epoch": 1.09,
"grad_norm": 1.0032306909561157,
"learning_rate": 3.9422015182884744e-05,
"loss": 0.9375,
"step": 19200
},
{
"epoch": 1.12,
"grad_norm": 1.4174487590789795,
"learning_rate": 3.9191971474580174e-05,
"loss": 0.9417,
"step": 19600
},
{
"epoch": 1.14,
"grad_norm": 1.037479281425476,
"learning_rate": 3.89619277662756e-05,
"loss": 0.925,
"step": 20000
},
{
"epoch": 1.16,
"grad_norm": 1.0530787706375122,
"learning_rate": 3.873245916724178e-05,
"loss": 0.9378,
"step": 20400
},
{
"epoch": 1.19,
"grad_norm": 1.1429826021194458,
"learning_rate": 3.85024154589372e-05,
"loss": 0.9386,
"step": 20800
},
{
"epoch": 1.21,
"grad_norm": 0.8666102886199951,
"learning_rate": 3.8272371750632626e-05,
"loss": 0.9169,
"step": 21200
},
{
"epoch": 1.23,
"grad_norm": 1.0205742120742798,
"learning_rate": 3.804232804232805e-05,
"loss": 0.9247,
"step": 21600
},
{
"epoch": 1.25,
"grad_norm": 1.046938180923462,
"learning_rate": 3.7812284334023465e-05,
"loss": 0.9168,
"step": 22000
},
{
"epoch": 1.28,
"grad_norm": 1.0534473657608032,
"learning_rate": 3.758224062571889e-05,
"loss": 0.9179,
"step": 22400
},
{
"epoch": 1.3,
"grad_norm": 0.9643887877464294,
"learning_rate": 3.735219691741431e-05,
"loss": 0.9147,
"step": 22800
},
{
"epoch": 1.32,
"grad_norm": 1.162414312362671,
"learning_rate": 3.7122153209109734e-05,
"loss": 0.9252,
"step": 23200
},
{
"epoch": 1.34,
"grad_norm": 1.0543121099472046,
"learning_rate": 3.689210950080516e-05,
"loss": 0.9002,
"step": 23600
},
{
"epoch": 1.37,
"grad_norm": 1.095170021057129,
"learning_rate": 3.666206579250058e-05,
"loss": 0.9078,
"step": 24000
},
{
"epoch": 1.39,
"grad_norm": 0.938887357711792,
"learning_rate": 3.6432022084195996e-05,
"loss": 0.9074,
"step": 24400
},
{
"epoch": 1.41,
"grad_norm": 1.1190577745437622,
"learning_rate": 3.620197837589142e-05,
"loss": 0.9087,
"step": 24800
},
{
"epoch": 1.44,
"grad_norm": 0.9494956731796265,
"learning_rate": 3.597193466758684e-05,
"loss": 0.8903,
"step": 25200
},
{
"epoch": 1.46,
"grad_norm": 1.1163825988769531,
"learning_rate": 3.5741890959282265e-05,
"loss": 0.8934,
"step": 25600
},
{
"epoch": 1.48,
"grad_norm": 0.9037898182868958,
"learning_rate": 3.551184725097769e-05,
"loss": 0.9022,
"step": 26000
},
{
"epoch": 1.5,
"grad_norm": 1.0662837028503418,
"learning_rate": 3.528237865194387e-05,
"loss": 0.8859,
"step": 26400
},
{
"epoch": 1.53,
"grad_norm": 0.8364838361740112,
"learning_rate": 3.5052334943639294e-05,
"loss": 0.8829,
"step": 26800
},
{
"epoch": 1.55,
"grad_norm": 1.109578013420105,
"learning_rate": 3.482229123533472e-05,
"loss": 0.8784,
"step": 27200
},
{
"epoch": 1.57,
"grad_norm": 0.8675488829612732,
"learning_rate": 3.459224752703013e-05,
"loss": 0.8822,
"step": 27600
},
{
"epoch": 1.6,
"grad_norm": 1.089260458946228,
"learning_rate": 3.4362203818725556e-05,
"loss": 0.8864,
"step": 28000
},
{
"epoch": 1.62,
"grad_norm": 1.2143120765686035,
"learning_rate": 3.413216011042098e-05,
"loss": 0.8872,
"step": 28400
},
{
"epoch": 1.64,
"grad_norm": 0.969947099685669,
"learning_rate": 3.39021164021164e-05,
"loss": 0.8888,
"step": 28800
},
{
"epoch": 1.66,
"grad_norm": 0.9949952960014343,
"learning_rate": 3.3672647803082585e-05,
"loss": 0.8837,
"step": 29200
},
{
"epoch": 1.69,
"grad_norm": 1.0020053386688232,
"learning_rate": 3.344260409477801e-05,
"loss": 0.8832,
"step": 29600
},
{
"epoch": 1.71,
"grad_norm": 0.9824060797691345,
"learning_rate": 3.321256038647343e-05,
"loss": 0.8772,
"step": 30000
},
{
"epoch": 1.73,
"grad_norm": 0.9114558100700378,
"learning_rate": 3.2982516678168854e-05,
"loss": 0.8618,
"step": 30400
},
{
"epoch": 1.76,
"grad_norm": 1.1614044904708862,
"learning_rate": 3.275247296986428e-05,
"loss": 0.8735,
"step": 30800
},
{
"epoch": 1.78,
"grad_norm": 1.030380129814148,
"learning_rate": 3.25224292615597e-05,
"loss": 0.8657,
"step": 31200
},
{
"epoch": 1.8,
"grad_norm": 0.9505811929702759,
"learning_rate": 3.229238555325512e-05,
"loss": 0.8783,
"step": 31600
},
{
"epoch": 1.82,
"grad_norm": 1.2939780950546265,
"learning_rate": 3.2062916954221306e-05,
"loss": 0.8635,
"step": 32000
},
{
"epoch": 1.85,
"grad_norm": 1.1592987775802612,
"learning_rate": 3.183287324591673e-05,
"loss": 0.8616,
"step": 32400
},
{
"epoch": 1.87,
"grad_norm": 1.104283332824707,
"learning_rate": 3.160340464688291e-05,
"loss": 0.8613,
"step": 32800
},
{
"epoch": 1.89,
"grad_norm": 0.9907343983650208,
"learning_rate": 3.1373360938578334e-05,
"loss": 0.8623,
"step": 33200
},
{
"epoch": 1.91,
"grad_norm": 0.96369868516922,
"learning_rate": 3.114331723027376e-05,
"loss": 0.8467,
"step": 33600
},
{
"epoch": 1.94,
"grad_norm": 1.026637315750122,
"learning_rate": 3.091327352196918e-05,
"loss": 0.8563,
"step": 34000
},
{
"epoch": 1.96,
"grad_norm": 1.00987708568573,
"learning_rate": 3.0683229813664596e-05,
"loss": 0.8489,
"step": 34400
},
{
"epoch": 1.98,
"grad_norm": 1.0772453546524048,
"learning_rate": 3.0453761214630783e-05,
"loss": 0.8597,
"step": 34800
},
{
"epoch": 2.0,
"eval_loss": 1.1818490028381348,
"eval_runtime": 240.2583,
"eval_samples_per_second": 250.876,
"eval_steps_per_second": 3.921,
"step": 35096
},
{
"epoch": 2.01,
"grad_norm": 1.0454893112182617,
"learning_rate": 3.0223717506326206e-05,
"loss": 0.8354,
"step": 35200
},
{
"epoch": 2.03,
"grad_norm": 1.0722404718399048,
"learning_rate": 2.9993673798021625e-05,
"loss": 0.8027,
"step": 35600
},
{
"epoch": 2.05,
"grad_norm": 1.144631266593933,
"learning_rate": 2.9763630089717048e-05,
"loss": 0.7981,
"step": 36000
},
{
"epoch": 2.07,
"grad_norm": 1.0820245742797852,
"learning_rate": 2.953358638141247e-05,
"loss": 0.7989,
"step": 36400
},
{
"epoch": 2.1,
"grad_norm": 1.1228435039520264,
"learning_rate": 2.930354267310789e-05,
"loss": 0.7908,
"step": 36800
},
{
"epoch": 2.12,
"grad_norm": 1.0229965448379517,
"learning_rate": 2.9073498964803314e-05,
"loss": 0.792,
"step": 37200
},
{
"epoch": 2.14,
"grad_norm": 1.0550298690795898,
"learning_rate": 2.8843455256498737e-05,
"loss": 0.8046,
"step": 37600
},
{
"epoch": 2.17,
"grad_norm": 1.1629900932312012,
"learning_rate": 2.8613411548194156e-05,
"loss": 0.8019,
"step": 38000
},
{
"epoch": 2.19,
"grad_norm": 1.0293209552764893,
"learning_rate": 2.838336783988958e-05,
"loss": 0.7967,
"step": 38400
},
{
"epoch": 2.21,
"grad_norm": 1.0259321928024292,
"learning_rate": 2.8153324131585002e-05,
"loss": 0.8035,
"step": 38800
},
{
"epoch": 2.23,
"grad_norm": 1.2149560451507568,
"learning_rate": 2.7923280423280422e-05,
"loss": 0.7979,
"step": 39200
},
{
"epoch": 2.26,
"grad_norm": 1.082778811454773,
"learning_rate": 2.7693236714975845e-05,
"loss": 0.7913,
"step": 39600
},
{
"epoch": 2.28,
"grad_norm": 0.9237461686134338,
"learning_rate": 2.7463768115942028e-05,
"loss": 0.7868,
"step": 40000
},
{
"epoch": 2.3,
"grad_norm": 0.9772054553031921,
"learning_rate": 2.723372440763745e-05,
"loss": 0.8001,
"step": 40400
},
{
"epoch": 2.33,
"grad_norm": 0.9553162455558777,
"learning_rate": 2.7003680699332874e-05,
"loss": 0.7898,
"step": 40800
},
{
"epoch": 2.35,
"grad_norm": 0.9960311055183411,
"learning_rate": 2.6773636991028293e-05,
"loss": 0.7935,
"step": 41200
},
{
"epoch": 2.37,
"grad_norm": 1.439426302909851,
"learning_rate": 2.654474350126524e-05,
"loss": 0.7809,
"step": 41600
},
{
"epoch": 2.39,
"grad_norm": 0.9864240288734436,
"learning_rate": 2.6315850011502185e-05,
"loss": 0.7857,
"step": 42000
},
{
"epoch": 2.42,
"grad_norm": 1.095920205116272,
"learning_rate": 2.608580630319761e-05,
"loss": 0.791,
"step": 42400
},
{
"epoch": 2.44,
"grad_norm": 0.9908497929573059,
"learning_rate": 2.5855762594893028e-05,
"loss": 0.7902,
"step": 42800
},
{
"epoch": 2.46,
"grad_norm": 1.1290860176086426,
"learning_rate": 2.562571888658845e-05,
"loss": 0.779,
"step": 43200
},
{
"epoch": 2.48,
"grad_norm": 1.1015818119049072,
"learning_rate": 2.5395675178283874e-05,
"loss": 0.794,
"step": 43600
},
{
"epoch": 2.51,
"grad_norm": 0.9187479615211487,
"learning_rate": 2.5165631469979294e-05,
"loss": 0.791,
"step": 44000
},
{
"epoch": 2.53,
"grad_norm": 1.1698708534240723,
"learning_rate": 2.493558776167472e-05,
"loss": 0.788,
"step": 44400
},
{
"epoch": 2.55,
"grad_norm": 0.9840272068977356,
"learning_rate": 2.4705544053370143e-05,
"loss": 0.7855,
"step": 44800
},
{
"epoch": 2.58,
"grad_norm": 1.0132951736450195,
"learning_rate": 2.4475500345065562e-05,
"loss": 0.7701,
"step": 45200
},
{
"epoch": 2.6,
"grad_norm": 1.0666146278381348,
"learning_rate": 2.4245456636760985e-05,
"loss": 0.7799,
"step": 45600
},
{
"epoch": 2.62,
"grad_norm": 1.20628023147583,
"learning_rate": 2.401541292845641e-05,
"loss": 0.7764,
"step": 46000
},
{
"epoch": 2.64,
"grad_norm": 0.8813405632972717,
"learning_rate": 2.3785369220151828e-05,
"loss": 0.7746,
"step": 46400
},
{
"epoch": 2.67,
"grad_norm": 1.2259961366653442,
"learning_rate": 2.355532551184725e-05,
"loss": 0.7734,
"step": 46800
},
{
"epoch": 2.69,
"grad_norm": 0.9694178700447083,
"learning_rate": 2.3325281803542674e-05,
"loss": 0.7784,
"step": 47200
},
{
"epoch": 2.71,
"grad_norm": 1.1529206037521362,
"learning_rate": 2.3095238095238097e-05,
"loss": 0.7711,
"step": 47600
},
{
"epoch": 2.74,
"grad_norm": 0.9313886165618896,
"learning_rate": 2.286519438693352e-05,
"loss": 0.7797,
"step": 48000
},
{
"epoch": 2.76,
"grad_norm": 1.1616884469985962,
"learning_rate": 2.2635150678628943e-05,
"loss": 0.7694,
"step": 48400
},
{
"epoch": 2.78,
"grad_norm": 1.096562147140503,
"learning_rate": 2.2405106970324362e-05,
"loss": 0.7627,
"step": 48800
},
{
"epoch": 2.8,
"grad_norm": 0.990018904209137,
"learning_rate": 2.2175063262019785e-05,
"loss": 0.7674,
"step": 49200
},
{
"epoch": 2.83,
"grad_norm": 1.0565036535263062,
"learning_rate": 2.194501955371521e-05,
"loss": 0.7606,
"step": 49600
},
{
"epoch": 2.85,
"grad_norm": 1.1336785554885864,
"learning_rate": 2.1714975845410628e-05,
"loss": 0.759,
"step": 50000
},
{
"epoch": 2.87,
"grad_norm": 1.0509406328201294,
"learning_rate": 2.148493213710605e-05,
"loss": 0.777,
"step": 50400
},
{
"epoch": 2.89,
"grad_norm": 1.0363755226135254,
"learning_rate": 2.1254888428801474e-05,
"loss": 0.7692,
"step": 50800
},
{
"epoch": 2.92,
"grad_norm": 1.204566240310669,
"learning_rate": 2.1024844720496894e-05,
"loss": 0.7695,
"step": 51200
},
{
"epoch": 2.94,
"grad_norm": 1.0907776355743408,
"learning_rate": 2.0794801012192317e-05,
"loss": 0.7718,
"step": 51600
},
{
"epoch": 2.96,
"grad_norm": 1.0475547313690186,
"learning_rate": 2.05653324131585e-05,
"loss": 0.7688,
"step": 52000
},
{
"epoch": 2.99,
"grad_norm": 1.2301433086395264,
"learning_rate": 2.0335288704853922e-05,
"loss": 0.7614,
"step": 52400
},
{
"epoch": 3.0,
"eval_loss": 1.177372694015503,
"eval_runtime": 239.9376,
"eval_samples_per_second": 251.211,
"eval_steps_per_second": 3.926,
"step": 52644
},
{
"epoch": 3.01,
"grad_norm": 1.330518364906311,
"learning_rate": 2.0105820105820105e-05,
"loss": 0.7462,
"step": 52800
},
{
"epoch": 3.03,
"grad_norm": 1.0969927310943604,
"learning_rate": 1.9875776397515528e-05,
"loss": 0.7272,
"step": 53200
},
{
"epoch": 3.05,
"grad_norm": 1.0054128170013428,
"learning_rate": 1.964573268921095e-05,
"loss": 0.7164,
"step": 53600
},
{
"epoch": 3.08,
"grad_norm": 0.9748343229293823,
"learning_rate": 1.9416264090177137e-05,
"loss": 0.7222,
"step": 54000
},
{
"epoch": 3.1,
"grad_norm": 0.9346728920936584,
"learning_rate": 1.9186220381872557e-05,
"loss": 0.7262,
"step": 54400
},
{
"epoch": 3.12,
"grad_norm": 1.0906745195388794,
"learning_rate": 1.895617667356798e-05,
"loss": 0.7224,
"step": 54800
},
{
"epoch": 3.15,
"grad_norm": 1.0387004613876343,
"learning_rate": 1.8726132965263403e-05,
"loss": 0.7319,
"step": 55200
},
{
"epoch": 3.17,
"grad_norm": 1.0948784351348877,
"learning_rate": 1.8496089256958822e-05,
"loss": 0.7118,
"step": 55600
},
{
"epoch": 3.19,
"grad_norm": 0.9571365118026733,
"learning_rate": 1.8266045548654245e-05,
"loss": 0.7127,
"step": 56000
},
{
"epoch": 3.21,
"grad_norm": 1.0155813694000244,
"learning_rate": 1.803600184034967e-05,
"loss": 0.7185,
"step": 56400
},
{
"epoch": 3.24,
"grad_norm": 1.0468432903289795,
"learning_rate": 1.780653324131585e-05,
"loss": 0.7223,
"step": 56800
},
{
"epoch": 3.26,
"grad_norm": 1.0302894115447998,
"learning_rate": 1.7576489533011274e-05,
"loss": 0.7179,
"step": 57200
},
{
"epoch": 3.28,
"grad_norm": 1.0335286855697632,
"learning_rate": 1.7346445824706694e-05,
"loss": 0.7144,
"step": 57600
},
{
"epoch": 3.31,
"grad_norm": 1.2124297618865967,
"learning_rate": 1.7116402116402117e-05,
"loss": 0.7152,
"step": 58000
},
{
"epoch": 3.33,
"grad_norm": 1.2023513317108154,
"learning_rate": 1.688635840809754e-05,
"loss": 0.7184,
"step": 58400
},
{
"epoch": 3.35,
"grad_norm": 1.0327694416046143,
"learning_rate": 1.665631469979296e-05,
"loss": 0.7128,
"step": 58800
},
{
"epoch": 3.37,
"grad_norm": 0.9827908873558044,
"learning_rate": 1.6426270991488382e-05,
"loss": 0.7162,
"step": 59200
},
{
"epoch": 3.4,
"grad_norm": 1.189833641052246,
"learning_rate": 1.6196227283183805e-05,
"loss": 0.7235,
"step": 59600
},
{
"epoch": 3.42,
"grad_norm": 1.0521001815795898,
"learning_rate": 1.5966183574879228e-05,
"loss": 0.7045,
"step": 60000
},
{
"epoch": 3.44,
"grad_norm": 1.0198911428451538,
"learning_rate": 1.573613986657465e-05,
"loss": 0.711,
"step": 60400
},
{
"epoch": 3.46,
"grad_norm": 1.1130218505859375,
"learning_rate": 1.5506096158270074e-05,
"loss": 0.7158,
"step": 60800
},
{
"epoch": 3.49,
"grad_norm": 0.8894535303115845,
"learning_rate": 1.5276052449965494e-05,
"loss": 0.7202,
"step": 61200
},
{
"epoch": 3.51,
"grad_norm": 1.1167349815368652,
"learning_rate": 1.5046008741660917e-05,
"loss": 0.7139,
"step": 61600
},
{
"epoch": 3.53,
"grad_norm": 1.2239285707473755,
"learning_rate": 1.4815965033356338e-05,
"loss": 0.7163,
"step": 62000
},
{
"epoch": 3.56,
"grad_norm": 1.1781020164489746,
"learning_rate": 1.458592132505176e-05,
"loss": 0.7193,
"step": 62400
},
{
"epoch": 3.58,
"grad_norm": 1.0834379196166992,
"learning_rate": 1.4356452726017944e-05,
"loss": 0.7147,
"step": 62800
},
{
"epoch": 3.6,
"grad_norm": 0.991364061832428,
"learning_rate": 1.4126409017713365e-05,
"loss": 0.7022,
"step": 63200
},
{
"epoch": 3.62,
"grad_norm": 1.1527454853057861,
"learning_rate": 1.3896365309408788e-05,
"loss": 0.7171,
"step": 63600
},
{
"epoch": 3.65,
"grad_norm": 0.9275304675102234,
"learning_rate": 1.366632160110421e-05,
"loss": 0.7083,
"step": 64000
},
{
"epoch": 3.67,
"grad_norm": 1.2628445625305176,
"learning_rate": 1.3436853002070392e-05,
"loss": 0.7069,
"step": 64400
},
{
"epoch": 3.69,
"grad_norm": 1.100540280342102,
"learning_rate": 1.3206809293765815e-05,
"loss": 0.7062,
"step": 64800
},
{
"epoch": 3.72,
"grad_norm": 1.1310228109359741,
"learning_rate": 1.2976765585461237e-05,
"loss": 0.6933,
"step": 65200
},
{
"epoch": 3.74,
"grad_norm": 1.2022795677185059,
"learning_rate": 1.2746721877156661e-05,
"loss": 0.7096,
"step": 65600
},
{
"epoch": 3.76,
"grad_norm": 2.5451197624206543,
"learning_rate": 1.2516678168852084e-05,
"loss": 0.7087,
"step": 66000
},
{
"epoch": 3.78,
"grad_norm": 1.125082015991211,
"learning_rate": 1.2286634460547504e-05,
"loss": 0.6946,
"step": 66400
},
{
"epoch": 3.81,
"grad_norm": 1.3816386461257935,
"learning_rate": 1.2056590752242927e-05,
"loss": 0.7091,
"step": 66800
},
{
"epoch": 3.83,
"grad_norm": 1.197379231452942,
"learning_rate": 1.182654704393835e-05,
"loss": 0.7195,
"step": 67200
},
{
"epoch": 3.85,
"grad_norm": 1.1002612113952637,
"learning_rate": 1.1597078444904533e-05,
"loss": 0.7021,
"step": 67600
},
{
"epoch": 3.88,
"grad_norm": 1.1073203086853027,
"learning_rate": 1.1367034736599954e-05,
"loss": 0.7033,
"step": 68000
},
{
"epoch": 3.9,
"grad_norm": 1.056327223777771,
"learning_rate": 1.1136991028295377e-05,
"loss": 0.7127,
"step": 68400
},
{
"epoch": 3.92,
"grad_norm": 0.9232028722763062,
"learning_rate": 1.090752242926156e-05,
"loss": 0.7055,
"step": 68800
},
{
"epoch": 3.94,
"grad_norm": 1.2083985805511475,
"learning_rate": 1.0678053830227744e-05,
"loss": 0.702,
"step": 69200
},
{
"epoch": 3.97,
"grad_norm": 1.1936637163162231,
"learning_rate": 1.0448010121923165e-05,
"loss": 0.6961,
"step": 69600
},
{
"epoch": 3.99,
"grad_norm": 1.1006180047988892,
"learning_rate": 1.0217966413618588e-05,
"loss": 0.6965,
"step": 70000
},
{
"epoch": 4.0,
"eval_loss": 1.182397723197937,
"eval_runtime": 239.7583,
"eval_samples_per_second": 251.399,
"eval_steps_per_second": 3.929,
"step": 70192
},
{
"epoch": 4.01,
"grad_norm": 0.9501661658287048,
"learning_rate": 9.988497814584771e-06,
"loss": 0.681,
"step": 70400
},
{
"epoch": 4.03,
"grad_norm": 1.1048214435577393,
"learning_rate": 9.759029215550954e-06,
"loss": 0.6718,
"step": 70800
},
{
"epoch": 4.06,
"grad_norm": 1.1402852535247803,
"learning_rate": 9.528985507246377e-06,
"loss": 0.6646,
"step": 71200
},
{
"epoch": 4.08,
"grad_norm": 1.1181689500808716,
"learning_rate": 9.298941798941798e-06,
"loss": 0.6803,
"step": 71600
},
{
"epoch": 4.1,
"grad_norm": 1.0445771217346191,
"learning_rate": 9.068898090637221e-06,
"loss": 0.6681,
"step": 72000
},
{
"epoch": 4.13,
"grad_norm": 1.2578911781311035,
"learning_rate": 8.838854382332644e-06,
"loss": 0.6743,
"step": 72400
},
{
"epoch": 4.15,
"grad_norm": 1.0545594692230225,
"learning_rate": 8.608810674028066e-06,
"loss": 0.6721,
"step": 72800
},
{
"epoch": 4.17,
"grad_norm": 1.0575259923934937,
"learning_rate": 8.378766965723487e-06,
"loss": 0.6692,
"step": 73200
},
{
"epoch": 4.19,
"grad_norm": 1.0617632865905762,
"learning_rate": 8.14872325741891e-06,
"loss": 0.6634,
"step": 73600
},
{
"epoch": 4.22,
"grad_norm": 1.1915861368179321,
"learning_rate": 7.918679549114331e-06,
"loss": 0.6712,
"step": 74000
},
{
"epoch": 4.24,
"grad_norm": 1.16647207736969,
"learning_rate": 7.688635840809754e-06,
"loss": 0.6663,
"step": 74400
},
{
"epoch": 4.26,
"grad_norm": 1.0123538970947266,
"learning_rate": 7.458592132505176e-06,
"loss": 0.6785,
"step": 74800
},
{
"epoch": 4.29,
"grad_norm": 1.0426361560821533,
"learning_rate": 7.228548424200598e-06,
"loss": 0.6825,
"step": 75200
},
{
"epoch": 4.31,
"grad_norm": 1.0516417026519775,
"learning_rate": 6.9985047158960205e-06,
"loss": 0.6662,
"step": 75600
},
{
"epoch": 4.33,
"grad_norm": 1.0297150611877441,
"learning_rate": 6.768461007591443e-06,
"loss": 0.667,
"step": 76000
},
{
"epoch": 4.35,
"grad_norm": 1.1151374578475952,
"learning_rate": 6.538417299286864e-06,
"loss": 0.6612,
"step": 76400
},
{
"epoch": 4.38,
"grad_norm": 1.100305438041687,
"learning_rate": 6.308373590982288e-06,
"loss": 0.6731,
"step": 76800
},
{
"epoch": 4.4,
"grad_norm": 1.0381314754486084,
"learning_rate": 6.078904991948471e-06,
"loss": 0.6816,
"step": 77200
},
{
"epoch": 4.42,
"grad_norm": 1.166762351989746,
"learning_rate": 5.848861283643893e-06,
"loss": 0.6674,
"step": 77600
},
{
"epoch": 4.44,
"grad_norm": 1.1503565311431885,
"learning_rate": 5.619392684610076e-06,
"loss": 0.6711,
"step": 78000
},
{
"epoch": 4.47,
"grad_norm": 1.2317191362380981,
"learning_rate": 5.3893489763054985e-06,
"loss": 0.6754,
"step": 78400
},
{
"epoch": 4.49,
"grad_norm": 1.1069433689117432,
"learning_rate": 5.159305268000921e-06,
"loss": 0.6657,
"step": 78800
},
{
"epoch": 4.51,
"grad_norm": 1.081761121749878,
"learning_rate": 4.929261559696342e-06,
"loss": 0.6632,
"step": 79200
},
{
"epoch": 4.54,
"grad_norm": 1.0425435304641724,
"learning_rate": 4.699217851391765e-06,
"loss": 0.6607,
"step": 79600
},
{
"epoch": 4.56,
"grad_norm": 1.1054120063781738,
"learning_rate": 4.469174143087187e-06,
"loss": 0.6747,
"step": 80000
},
{
"epoch": 4.58,
"grad_norm": 1.1128557920455933,
"learning_rate": 4.239130434782608e-06,
"loss": 0.6686,
"step": 80400
},
{
"epoch": 4.6,
"grad_norm": 1.2730941772460938,
"learning_rate": 4.009086726478031e-06,
"loss": 0.6733,
"step": 80800
},
{
"epoch": 4.63,
"grad_norm": 1.291892647743225,
"learning_rate": 3.779043018173453e-06,
"loss": 0.6631,
"step": 81200
},
{
"epoch": 4.65,
"grad_norm": 0.9876201152801514,
"learning_rate": 3.548999309868875e-06,
"loss": 0.6691,
"step": 81600
},
{
"epoch": 4.67,
"grad_norm": 1.2430497407913208,
"learning_rate": 3.3189556015642977e-06,
"loss": 0.6579,
"step": 82000
},
{
"epoch": 4.7,
"grad_norm": 1.012856364250183,
"learning_rate": 3.0889118932597194e-06,
"loss": 0.6637,
"step": 82400
},
{
"epoch": 4.72,
"grad_norm": 1.1547410488128662,
"learning_rate": 2.8588681849551415e-06,
"loss": 0.6801,
"step": 82800
},
{
"epoch": 4.74,
"grad_norm": 1.1335712671279907,
"learning_rate": 2.6288244766505636e-06,
"loss": 0.6717,
"step": 83200
},
{
"epoch": 4.76,
"grad_norm": 1.0243676900863647,
"learning_rate": 2.3987807683459858e-06,
"loss": 0.6639,
"step": 83600
},
{
"epoch": 4.79,
"grad_norm": 1.0435352325439453,
"learning_rate": 2.1693121693121695e-06,
"loss": 0.677,
"step": 84000
},
{
"epoch": 4.81,
"grad_norm": 1.1295571327209473,
"learning_rate": 1.9398435702783527e-06,
"loss": 0.6614,
"step": 84400
},
{
"epoch": 4.83,
"grad_norm": 1.0412089824676514,
"learning_rate": 1.7103749712445366e-06,
"loss": 0.6613,
"step": 84800
},
{
"epoch": 4.86,
"grad_norm": 1.0020956993103027,
"learning_rate": 1.4803312629399585e-06,
"loss": 0.6729,
"step": 85200
},
{
"epoch": 4.88,
"grad_norm": 1.2422608137130737,
"learning_rate": 1.2502875546353809e-06,
"loss": 0.6599,
"step": 85600
},
{
"epoch": 4.9,
"grad_norm": 1.1637760400772095,
"learning_rate": 1.0202438463308028e-06,
"loss": 0.664,
"step": 86000
},
{
"epoch": 4.92,
"grad_norm": 1.0108635425567627,
"learning_rate": 7.902001380262249e-07,
"loss": 0.6627,
"step": 86400
},
{
"epoch": 4.95,
"grad_norm": 1.2056249380111694,
"learning_rate": 5.601564297216472e-07,
"loss": 0.67,
"step": 86800
},
{
"epoch": 4.97,
"grad_norm": 3.1152091026306152,
"learning_rate": 3.301127214170693e-07,
"loss": 0.6619,
"step": 87200
},
{
"epoch": 4.99,
"grad_norm": 1.2400481700897217,
"learning_rate": 1.0006901311249138e-07,
"loss": 0.6636,
"step": 87600
},
{
"epoch": 5.0,
"eval_loss": 1.1956850290298462,
"eval_runtime": 239.8036,
"eval_samples_per_second": 251.351,
"eval_steps_per_second": 3.928,
"step": 87740
},
{
"epoch": 5.0,
"step": 87740,
"total_flos": 1.7097588901675008e+18,
"train_loss": 0.8546056313098415,
"train_runtime": 31454.7003,
"train_samples_per_second": 89.265,
"train_steps_per_second": 2.789
}
],
"logging_steps": 400,
"max_steps": 87740,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 1.7097588901675008e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}