value_model / trainer_state.json
xxazz's picture
vm
47f6fd5
raw
history blame
26.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.99968,
"eval_steps": 500,
"global_step": 1562,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0064,
"grad_norm": 264.14157105208596,
"learning_rate": 1.9957319675629537e-05,
"loss": 6.6226,
"step": 10
},
{
"epoch": 0.0128,
"grad_norm": 0.9501767638724645,
"learning_rate": 1.9914639351259072e-05,
"loss": 1.0902,
"step": 20
},
{
"epoch": 0.0192,
"grad_norm": 65.28805841366503,
"learning_rate": 1.9871959026888607e-05,
"loss": 0.9029,
"step": 30
},
{
"epoch": 0.0256,
"grad_norm": 34.305538715375285,
"learning_rate": 1.982927870251814e-05,
"loss": 0.7423,
"step": 40
},
{
"epoch": 0.032,
"grad_norm": 38.727668273486,
"learning_rate": 1.9786598378147674e-05,
"loss": 0.7046,
"step": 50
},
{
"epoch": 0.0384,
"grad_norm": 36.57196447219297,
"learning_rate": 1.9743918053777212e-05,
"loss": 0.7285,
"step": 60
},
{
"epoch": 0.0448,
"grad_norm": 53.27594809439924,
"learning_rate": 1.9701237729406747e-05,
"loss": 0.7871,
"step": 70
},
{
"epoch": 0.0512,
"grad_norm": 88.03207183558006,
"learning_rate": 1.965855740503628e-05,
"loss": 0.769,
"step": 80
},
{
"epoch": 0.0576,
"grad_norm": 51.43781215183131,
"learning_rate": 1.9615877080665814e-05,
"loss": 0.7362,
"step": 90
},
{
"epoch": 0.064,
"grad_norm": 38.96905697564119,
"learning_rate": 1.957319675629535e-05,
"loss": 0.7233,
"step": 100
},
{
"epoch": 0.0704,
"grad_norm": 22.844565197931338,
"learning_rate": 1.9530516431924884e-05,
"loss": 0.7312,
"step": 110
},
{
"epoch": 0.0768,
"grad_norm": 5.837139952851078,
"learning_rate": 1.948783610755442e-05,
"loss": 0.6961,
"step": 120
},
{
"epoch": 0.0832,
"grad_norm": 15.554948936391037,
"learning_rate": 1.9445155783183954e-05,
"loss": 0.6942,
"step": 130
},
{
"epoch": 0.0896,
"grad_norm": 1.046867037922016,
"learning_rate": 1.940247545881349e-05,
"loss": 0.7692,
"step": 140
},
{
"epoch": 0.096,
"grad_norm": 56.046251029505235,
"learning_rate": 1.935979513444302e-05,
"loss": 0.7653,
"step": 150
},
{
"epoch": 0.1024,
"grad_norm": 44.97424238614769,
"learning_rate": 1.931711481007256e-05,
"loss": 0.7546,
"step": 160
},
{
"epoch": 0.1088,
"grad_norm": 26.58391385742325,
"learning_rate": 1.9274434485702095e-05,
"loss": 0.7372,
"step": 170
},
{
"epoch": 0.1152,
"grad_norm": 28.309929026079814,
"learning_rate": 1.923175416133163e-05,
"loss": 0.7312,
"step": 180
},
{
"epoch": 0.1216,
"grad_norm": 9.807211762945009,
"learning_rate": 1.918907383696116e-05,
"loss": 0.7098,
"step": 190
},
{
"epoch": 0.128,
"grad_norm": 19.15421302054025,
"learning_rate": 1.9146393512590697e-05,
"loss": 0.7016,
"step": 200
},
{
"epoch": 0.1344,
"grad_norm": 12.008823873572915,
"learning_rate": 1.910371318822023e-05,
"loss": 0.7025,
"step": 210
},
{
"epoch": 0.1408,
"grad_norm": 42.57181850852334,
"learning_rate": 1.9061032863849767e-05,
"loss": 0.7438,
"step": 220
},
{
"epoch": 0.1472,
"grad_norm": 15.052154601632347,
"learning_rate": 1.9018352539479302e-05,
"loss": 0.7295,
"step": 230
},
{
"epoch": 0.1536,
"grad_norm": 42.381917976054716,
"learning_rate": 1.8975672215108837e-05,
"loss": 0.6958,
"step": 240
},
{
"epoch": 0.16,
"grad_norm": 20.771285759395806,
"learning_rate": 1.893299189073837e-05,
"loss": 0.6944,
"step": 250
},
{
"epoch": 0.1664,
"grad_norm": 4.394595608571032,
"learning_rate": 1.8890311566367904e-05,
"loss": 0.7176,
"step": 260
},
{
"epoch": 0.1728,
"grad_norm": 249.89136352365057,
"learning_rate": 1.8847631241997442e-05,
"loss": 1.0083,
"step": 270
},
{
"epoch": 0.1792,
"grad_norm": 5.996387332631578,
"learning_rate": 1.8804950917626977e-05,
"loss": 0.6968,
"step": 280
},
{
"epoch": 0.1856,
"grad_norm": 19.102807026081695,
"learning_rate": 1.8762270593256512e-05,
"loss": 0.7182,
"step": 290
},
{
"epoch": 0.192,
"grad_norm": 48.715309036127806,
"learning_rate": 1.8719590268886044e-05,
"loss": 0.7188,
"step": 300
},
{
"epoch": 0.1984,
"grad_norm": 16.250291382784976,
"learning_rate": 1.867690994451558e-05,
"loss": 0.732,
"step": 310
},
{
"epoch": 0.2048,
"grad_norm": 1.1019470342998559,
"learning_rate": 1.8634229620145114e-05,
"loss": 0.7114,
"step": 320
},
{
"epoch": 0.2112,
"grad_norm": 25.505165013331446,
"learning_rate": 1.859154929577465e-05,
"loss": 0.6976,
"step": 330
},
{
"epoch": 0.2176,
"grad_norm": 25.52219679042433,
"learning_rate": 1.8548868971404184e-05,
"loss": 0.7012,
"step": 340
},
{
"epoch": 0.224,
"grad_norm": 38.23360683450805,
"learning_rate": 1.850618864703372e-05,
"loss": 0.7012,
"step": 350
},
{
"epoch": 0.2304,
"grad_norm": 45.80967639245008,
"learning_rate": 1.846350832266325e-05,
"loss": 0.7217,
"step": 360
},
{
"epoch": 0.2368,
"grad_norm": 3.2736203166626066,
"learning_rate": 1.842082799829279e-05,
"loss": 0.7068,
"step": 370
},
{
"epoch": 0.2432,
"grad_norm": 44.2443823368047,
"learning_rate": 1.8378147673922325e-05,
"loss": 0.7054,
"step": 380
},
{
"epoch": 0.2496,
"grad_norm": 24.479641918561533,
"learning_rate": 1.833546734955186e-05,
"loss": 0.7063,
"step": 390
},
{
"epoch": 0.256,
"grad_norm": 14.48013040788769,
"learning_rate": 1.8292787025181395e-05,
"loss": 0.7189,
"step": 400
},
{
"epoch": 0.2624,
"grad_norm": 27.59402004739812,
"learning_rate": 1.8250106700810927e-05,
"loss": 0.7381,
"step": 410
},
{
"epoch": 0.2688,
"grad_norm": 20.991133253614027,
"learning_rate": 1.8207426376440462e-05,
"loss": 0.6987,
"step": 420
},
{
"epoch": 0.2752,
"grad_norm": 55.45512096448618,
"learning_rate": 1.8164746052069997e-05,
"loss": 0.698,
"step": 430
},
{
"epoch": 0.2816,
"grad_norm": 12.281688748811272,
"learning_rate": 1.8122065727699532e-05,
"loss": 0.6969,
"step": 440
},
{
"epoch": 0.288,
"grad_norm": 16.799892965286315,
"learning_rate": 1.8079385403329067e-05,
"loss": 0.6937,
"step": 450
},
{
"epoch": 0.2944,
"grad_norm": 40.926146677115995,
"learning_rate": 1.8036705078958602e-05,
"loss": 0.7103,
"step": 460
},
{
"epoch": 0.3008,
"grad_norm": 1.2306810165532065,
"learning_rate": 1.7994024754588134e-05,
"loss": 0.7035,
"step": 470
},
{
"epoch": 0.3072,
"grad_norm": 16.296397242310103,
"learning_rate": 1.7951344430217672e-05,
"loss": 0.6957,
"step": 480
},
{
"epoch": 0.3136,
"grad_norm": 58.82531456657698,
"learning_rate": 1.7908664105847207e-05,
"loss": 0.7044,
"step": 490
},
{
"epoch": 0.32,
"grad_norm": 46.584410894628604,
"learning_rate": 1.7865983781476742e-05,
"loss": 0.7036,
"step": 500
},
{
"epoch": 0.3264,
"grad_norm": 70.47645786626141,
"learning_rate": 1.7823303457106274e-05,
"loss": 0.7297,
"step": 510
},
{
"epoch": 0.3328,
"grad_norm": 21.143210263370694,
"learning_rate": 1.778062313273581e-05,
"loss": 0.7746,
"step": 520
},
{
"epoch": 0.3392,
"grad_norm": 57.60458892440397,
"learning_rate": 1.7737942808365344e-05,
"loss": 0.7103,
"step": 530
},
{
"epoch": 0.3456,
"grad_norm": 1.1421527686881012,
"learning_rate": 1.769526248399488e-05,
"loss": 0.711,
"step": 540
},
{
"epoch": 0.352,
"grad_norm": 44.938444716149746,
"learning_rate": 1.7652582159624414e-05,
"loss": 0.702,
"step": 550
},
{
"epoch": 0.3584,
"grad_norm": 46.01945874630251,
"learning_rate": 1.760990183525395e-05,
"loss": 0.7603,
"step": 560
},
{
"epoch": 0.3648,
"grad_norm": 61.75493838986847,
"learning_rate": 1.7567221510883485e-05,
"loss": 0.7407,
"step": 570
},
{
"epoch": 0.3712,
"grad_norm": 53.20809128552049,
"learning_rate": 1.752454118651302e-05,
"loss": 0.7264,
"step": 580
},
{
"epoch": 0.3776,
"grad_norm": 4.061924742090285,
"learning_rate": 1.7481860862142555e-05,
"loss": 0.699,
"step": 590
},
{
"epoch": 0.384,
"grad_norm": 41.022631989022365,
"learning_rate": 1.743918053777209e-05,
"loss": 0.7228,
"step": 600
},
{
"epoch": 0.3904,
"grad_norm": 11.308284278110525,
"learning_rate": 1.7396500213401625e-05,
"loss": 0.7007,
"step": 610
},
{
"epoch": 0.3968,
"grad_norm": 30.45449828508719,
"learning_rate": 1.7353819889031157e-05,
"loss": 0.7112,
"step": 620
},
{
"epoch": 0.4032,
"grad_norm": 9.017073598179262,
"learning_rate": 1.7311139564660692e-05,
"loss": 0.6961,
"step": 630
},
{
"epoch": 0.4096,
"grad_norm": 4.579916728672278,
"learning_rate": 1.7268459240290227e-05,
"loss": 0.6941,
"step": 640
},
{
"epoch": 0.416,
"grad_norm": 29.95709147454572,
"learning_rate": 1.7225778915919762e-05,
"loss": 0.6933,
"step": 650
},
{
"epoch": 0.4224,
"grad_norm": 37.94330872039394,
"learning_rate": 1.7183098591549297e-05,
"loss": 0.7104,
"step": 660
},
{
"epoch": 0.4288,
"grad_norm": 9.085675067352579,
"learning_rate": 1.7140418267178832e-05,
"loss": 0.711,
"step": 670
},
{
"epoch": 0.4352,
"grad_norm": 4.5685693320566845,
"learning_rate": 1.7097737942808367e-05,
"loss": 0.7053,
"step": 680
},
{
"epoch": 0.4416,
"grad_norm": 9.331385464530229,
"learning_rate": 1.7055057618437902e-05,
"loss": 0.6937,
"step": 690
},
{
"epoch": 0.448,
"grad_norm": 4.706391444594712,
"learning_rate": 1.7012377294067437e-05,
"loss": 0.6947,
"step": 700
},
{
"epoch": 0.4544,
"grad_norm": 77.0732426593841,
"learning_rate": 1.6969696969696972e-05,
"loss": 0.7077,
"step": 710
},
{
"epoch": 0.4608,
"grad_norm": 29.754416167509486,
"learning_rate": 1.6927016645326508e-05,
"loss": 0.7118,
"step": 720
},
{
"epoch": 0.4672,
"grad_norm": 65.82557320031596,
"learning_rate": 1.688433632095604e-05,
"loss": 0.7262,
"step": 730
},
{
"epoch": 0.4736,
"grad_norm": 43.16254755272887,
"learning_rate": 1.6841655996585574e-05,
"loss": 0.794,
"step": 740
},
{
"epoch": 0.48,
"grad_norm": 33.90296365072727,
"learning_rate": 1.679897567221511e-05,
"loss": 0.7004,
"step": 750
},
{
"epoch": 0.4864,
"grad_norm": 30.14465166363261,
"learning_rate": 1.6756295347844644e-05,
"loss": 0.696,
"step": 760
},
{
"epoch": 0.4928,
"grad_norm": 9.065074857179535,
"learning_rate": 1.671361502347418e-05,
"loss": 0.6937,
"step": 770
},
{
"epoch": 0.4992,
"grad_norm": 0.3152892417962158,
"learning_rate": 1.6670934699103715e-05,
"loss": 0.6961,
"step": 780
},
{
"epoch": 0.5056,
"grad_norm": 13.642994413090179,
"learning_rate": 1.662825437473325e-05,
"loss": 0.7001,
"step": 790
},
{
"epoch": 0.512,
"grad_norm": 13.842601794795,
"learning_rate": 1.6585574050362785e-05,
"loss": 0.6964,
"step": 800
},
{
"epoch": 0.5184,
"grad_norm": 47.453063920474996,
"learning_rate": 1.654289372599232e-05,
"loss": 0.7064,
"step": 810
},
{
"epoch": 0.5248,
"grad_norm": 14.240385288413568,
"learning_rate": 1.6500213401621855e-05,
"loss": 0.6975,
"step": 820
},
{
"epoch": 0.5312,
"grad_norm": 24.14687264769156,
"learning_rate": 1.645753307725139e-05,
"loss": 0.6935,
"step": 830
},
{
"epoch": 0.5376,
"grad_norm": 19.2138824233043,
"learning_rate": 1.6414852752880922e-05,
"loss": 0.7202,
"step": 840
},
{
"epoch": 0.544,
"grad_norm": 56.99219369895073,
"learning_rate": 1.6372172428510457e-05,
"loss": 0.7173,
"step": 850
},
{
"epoch": 0.5504,
"grad_norm": 19.56797572499434,
"learning_rate": 1.6329492104139992e-05,
"loss": 0.7115,
"step": 860
},
{
"epoch": 0.5568,
"grad_norm": 4.792731482537688,
"learning_rate": 1.6286811779769527e-05,
"loss": 0.6995,
"step": 870
},
{
"epoch": 0.5632,
"grad_norm": 9.561587575385854,
"learning_rate": 1.6244131455399062e-05,
"loss": 0.6929,
"step": 880
},
{
"epoch": 0.5696,
"grad_norm": 9.602797799334187,
"learning_rate": 1.6201451131028597e-05,
"loss": 0.6915,
"step": 890
},
{
"epoch": 0.576,
"grad_norm": 50.782645803497864,
"learning_rate": 1.6158770806658132e-05,
"loss": 0.7257,
"step": 900
},
{
"epoch": 0.5824,
"grad_norm": 38.26092038506421,
"learning_rate": 1.6116090482287667e-05,
"loss": 0.8023,
"step": 910
},
{
"epoch": 0.5888,
"grad_norm": 22.374195940173102,
"learning_rate": 1.6073410157917202e-05,
"loss": 0.7071,
"step": 920
},
{
"epoch": 0.5952,
"grad_norm": 0.40364266839819835,
"learning_rate": 1.6030729833546738e-05,
"loss": 0.702,
"step": 930
},
{
"epoch": 0.6016,
"grad_norm": 23.542668702826873,
"learning_rate": 1.5988049509176273e-05,
"loss": 0.6971,
"step": 940
},
{
"epoch": 0.608,
"grad_norm": 9.318677225633424,
"learning_rate": 1.5945369184805804e-05,
"loss": 0.7137,
"step": 950
},
{
"epoch": 0.6144,
"grad_norm": 7.1239062789751255,
"learning_rate": 1.590268886043534e-05,
"loss": 0.6962,
"step": 960
},
{
"epoch": 0.6208,
"grad_norm": 41.611496617926214,
"learning_rate": 1.5860008536064874e-05,
"loss": 0.7045,
"step": 970
},
{
"epoch": 0.6272,
"grad_norm": 7.664807068778342,
"learning_rate": 1.581732821169441e-05,
"loss": 0.7106,
"step": 980
},
{
"epoch": 0.6336,
"grad_norm": 57.77894192842512,
"learning_rate": 1.5774647887323945e-05,
"loss": 0.7332,
"step": 990
},
{
"epoch": 0.64,
"grad_norm": 7.447432454549683,
"learning_rate": 1.573196756295348e-05,
"loss": 0.6979,
"step": 1000
},
{
"epoch": 0.6464,
"grad_norm": 40.606430931214454,
"learning_rate": 1.5689287238583015e-05,
"loss": 0.7024,
"step": 1010
},
{
"epoch": 0.6528,
"grad_norm": 22.264418589209143,
"learning_rate": 1.564660691421255e-05,
"loss": 0.688,
"step": 1020
},
{
"epoch": 0.6592,
"grad_norm": 45.36453945853153,
"learning_rate": 1.5603926589842085e-05,
"loss": 0.7025,
"step": 1030
},
{
"epoch": 0.6656,
"grad_norm": 35.896572490746344,
"learning_rate": 1.556124626547162e-05,
"loss": 0.699,
"step": 1040
},
{
"epoch": 0.672,
"grad_norm": 25.283604621062047,
"learning_rate": 1.5518565941101155e-05,
"loss": 0.6992,
"step": 1050
},
{
"epoch": 0.6784,
"grad_norm": 88.95336614569354,
"learning_rate": 1.5475885616730687e-05,
"loss": 0.726,
"step": 1060
},
{
"epoch": 0.6848,
"grad_norm": 12.303576001457213,
"learning_rate": 1.5433205292360222e-05,
"loss": 0.7156,
"step": 1070
},
{
"epoch": 0.6912,
"grad_norm": 2.543949930035147,
"learning_rate": 1.5390524967989757e-05,
"loss": 0.6923,
"step": 1080
},
{
"epoch": 0.6976,
"grad_norm": 15.168062777033827,
"learning_rate": 1.5347844643619292e-05,
"loss": 0.7014,
"step": 1090
},
{
"epoch": 0.704,
"grad_norm": 42.69108600760037,
"learning_rate": 1.5305164319248827e-05,
"loss": 0.698,
"step": 1100
},
{
"epoch": 0.7104,
"grad_norm": 24.972508867782874,
"learning_rate": 1.5262483994878362e-05,
"loss": 0.6991,
"step": 1110
},
{
"epoch": 0.7168,
"grad_norm": 2.4932061109832357,
"learning_rate": 1.5219803670507897e-05,
"loss": 0.7331,
"step": 1120
},
{
"epoch": 0.7232,
"grad_norm": 12.62219152048133,
"learning_rate": 1.5177123346137432e-05,
"loss": 0.7064,
"step": 1130
},
{
"epoch": 0.7296,
"grad_norm": 38.458649768848474,
"learning_rate": 1.5134443021766968e-05,
"loss": 0.7015,
"step": 1140
},
{
"epoch": 0.736,
"grad_norm": 43.74951168967799,
"learning_rate": 1.5091762697396501e-05,
"loss": 0.7,
"step": 1150
},
{
"epoch": 0.7424,
"grad_norm": 15.194897767523432,
"learning_rate": 1.5049082373026036e-05,
"loss": 0.7132,
"step": 1160
},
{
"epoch": 0.7488,
"grad_norm": 78.5438809363444,
"learning_rate": 1.5006402048655571e-05,
"loss": 0.713,
"step": 1170
},
{
"epoch": 0.7552,
"grad_norm": 22.73674000916242,
"learning_rate": 1.4963721724285105e-05,
"loss": 0.6987,
"step": 1180
},
{
"epoch": 0.7616,
"grad_norm": 25.070819559325827,
"learning_rate": 1.492104139991464e-05,
"loss": 0.6985,
"step": 1190
},
{
"epoch": 0.768,
"grad_norm": 32.548630882591176,
"learning_rate": 1.4878361075544175e-05,
"loss": 0.6985,
"step": 1200
},
{
"epoch": 0.7744,
"grad_norm": 17.57196446777494,
"learning_rate": 1.4835680751173711e-05,
"loss": 0.6943,
"step": 1210
},
{
"epoch": 0.7808,
"grad_norm": 29.055248184445425,
"learning_rate": 1.4793000426803245e-05,
"loss": 0.6975,
"step": 1220
},
{
"epoch": 0.7872,
"grad_norm": 24.3985412253173,
"learning_rate": 1.475032010243278e-05,
"loss": 0.7007,
"step": 1230
},
{
"epoch": 0.7936,
"grad_norm": 2.4446460662929668,
"learning_rate": 1.4707639778062315e-05,
"loss": 0.7073,
"step": 1240
},
{
"epoch": 0.8,
"grad_norm": 2.491850857326724,
"learning_rate": 1.466495945369185e-05,
"loss": 0.6941,
"step": 1250
},
{
"epoch": 0.8064,
"grad_norm": 27.558412059329246,
"learning_rate": 1.4622279129321384e-05,
"loss": 0.7,
"step": 1260
},
{
"epoch": 0.8128,
"grad_norm": 25.007444288741887,
"learning_rate": 1.4579598804950919e-05,
"loss": 0.6955,
"step": 1270
},
{
"epoch": 0.8192,
"grad_norm": 27.307466640271596,
"learning_rate": 1.4536918480580454e-05,
"loss": 0.7095,
"step": 1280
},
{
"epoch": 0.8256,
"grad_norm": 38.01718334191501,
"learning_rate": 1.4494238156209987e-05,
"loss": 0.702,
"step": 1290
},
{
"epoch": 0.832,
"grad_norm": 61.76904611652498,
"learning_rate": 1.4451557831839522e-05,
"loss": 0.7009,
"step": 1300
},
{
"epoch": 0.8384,
"grad_norm": 10.203937504750813,
"learning_rate": 1.4408877507469059e-05,
"loss": 0.7007,
"step": 1310
},
{
"epoch": 0.8448,
"grad_norm": 2.56704746207845,
"learning_rate": 1.4366197183098594e-05,
"loss": 0.6919,
"step": 1320
},
{
"epoch": 0.8512,
"grad_norm": 29.18243854368692,
"learning_rate": 1.4323516858728127e-05,
"loss": 0.7046,
"step": 1330
},
{
"epoch": 0.8576,
"grad_norm": 2.482434788851533,
"learning_rate": 1.4280836534357663e-05,
"loss": 0.6976,
"step": 1340
},
{
"epoch": 0.864,
"grad_norm": 25.193988543576637,
"learning_rate": 1.4238156209987198e-05,
"loss": 0.6971,
"step": 1350
},
{
"epoch": 0.8704,
"grad_norm": 15.613363287685178,
"learning_rate": 1.4195475885616733e-05,
"loss": 0.6981,
"step": 1360
},
{
"epoch": 0.8768,
"grad_norm": 46.55913752357568,
"learning_rate": 1.4152795561246266e-05,
"loss": 0.7012,
"step": 1370
},
{
"epoch": 0.8832,
"grad_norm": 2.513971866784115,
"learning_rate": 1.4110115236875801e-05,
"loss": 0.7114,
"step": 1380
},
{
"epoch": 0.8896,
"grad_norm": 2.533912261677584,
"learning_rate": 1.4067434912505336e-05,
"loss": 0.6921,
"step": 1390
},
{
"epoch": 0.896,
"grad_norm": 15.433305819732965,
"learning_rate": 1.402475458813487e-05,
"loss": 0.7028,
"step": 1400
},
{
"epoch": 0.9024,
"grad_norm": 25.989286713479526,
"learning_rate": 1.3982074263764405e-05,
"loss": 0.6999,
"step": 1410
},
{
"epoch": 0.9088,
"grad_norm": 5.085959808829346,
"learning_rate": 1.3939393939393942e-05,
"loss": 0.699,
"step": 1420
},
{
"epoch": 0.9152,
"grad_norm": 33.19152083521873,
"learning_rate": 1.3896713615023477e-05,
"loss": 0.697,
"step": 1430
},
{
"epoch": 0.9216,
"grad_norm": 27.992023707013434,
"learning_rate": 1.385403329065301e-05,
"loss": 0.6958,
"step": 1440
},
{
"epoch": 0.928,
"grad_norm": 48.60020679215588,
"learning_rate": 1.3811352966282545e-05,
"loss": 0.7087,
"step": 1450
},
{
"epoch": 0.9344,
"grad_norm": 33.13799019239982,
"learning_rate": 1.376867264191208e-05,
"loss": 0.7087,
"step": 1460
},
{
"epoch": 0.9408,
"grad_norm": 67.15874404247629,
"learning_rate": 1.3725992317541614e-05,
"loss": 0.7119,
"step": 1470
},
{
"epoch": 0.9472,
"grad_norm": 71.5479326072592,
"learning_rate": 1.3683311993171149e-05,
"loss": 0.7208,
"step": 1480
},
{
"epoch": 0.9536,
"grad_norm": 27.89037537127557,
"learning_rate": 1.3640631668800684e-05,
"loss": 0.7059,
"step": 1490
},
{
"epoch": 0.96,
"grad_norm": 58.44481380986598,
"learning_rate": 1.3597951344430219e-05,
"loss": 0.7245,
"step": 1500
},
{
"epoch": 0.9664,
"grad_norm": 50.08324583292298,
"learning_rate": 1.3555271020059752e-05,
"loss": 0.7024,
"step": 1510
},
{
"epoch": 0.9728,
"grad_norm": 67.43830099761054,
"learning_rate": 1.3512590695689289e-05,
"loss": 0.7142,
"step": 1520
},
{
"epoch": 0.9792,
"grad_norm": 7.703745975649705,
"learning_rate": 1.3469910371318824e-05,
"loss": 0.727,
"step": 1530
},
{
"epoch": 0.9856,
"grad_norm": 2.6101317494535197,
"learning_rate": 1.342723004694836e-05,
"loss": 0.6886,
"step": 1540
},
{
"epoch": 0.992,
"grad_norm": 2.5758180591205395,
"learning_rate": 1.3384549722577893e-05,
"loss": 0.7053,
"step": 1550
},
{
"epoch": 0.9984,
"grad_norm": 14.198716739942608,
"learning_rate": 1.3341869398207428e-05,
"loss": 0.7095,
"step": 1560
},
{
"epoch": 0.99968,
"eval_accuracy": 0.51,
"eval_loss": 0.6943749785423279,
"eval_runtime": 6.1368,
"eval_samples_per_second": 32.59,
"eval_steps_per_second": 4.074,
"step": 1562
}
],
"logging_steps": 10,
"max_steps": 4686,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 84298765959168.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}