|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 812, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.22422564560903913, |
|
"learning_rate": 2.4390243902439027e-06, |
|
"loss": 0.8165, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.24393612584966326, |
|
"learning_rate": 1.2195121951219513e-05, |
|
"loss": 0.8171, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.32976460652590056, |
|
"learning_rate": 2.4390243902439026e-05, |
|
"loss": 0.8001, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.22143488102760825, |
|
"learning_rate": 3.6585365853658535e-05, |
|
"loss": 0.7749, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.27241208120124494, |
|
"learning_rate": 4.878048780487805e-05, |
|
"loss": 0.7399, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.22476497302964527, |
|
"learning_rate": 6.097560975609756e-05, |
|
"loss": 0.765, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.23267051655279075, |
|
"learning_rate": 7.317073170731707e-05, |
|
"loss": 0.7434, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.22352973519663066, |
|
"learning_rate": 8.53658536585366e-05, |
|
"loss": 0.7884, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.23366158183718752, |
|
"learning_rate": 9.75609756097561e-05, |
|
"loss": 0.767, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.21853843372631854, |
|
"learning_rate": 0.00010975609756097563, |
|
"loss": 0.76, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.1997879106921521, |
|
"learning_rate": 0.00012195121951219512, |
|
"loss": 0.7516, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.2251793056206077, |
|
"learning_rate": 0.00013414634146341464, |
|
"loss": 0.7173, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.21081648300798228, |
|
"learning_rate": 0.00014634146341463414, |
|
"loss": 0.7528, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.23666214760062876, |
|
"learning_rate": 0.00015853658536585366, |
|
"loss": 0.7404, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.23722379969745064, |
|
"learning_rate": 0.0001707317073170732, |
|
"loss": 0.7612, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.24333660134577043, |
|
"learning_rate": 0.0001829268292682927, |
|
"loss": 0.717, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.24793258937714172, |
|
"learning_rate": 0.0001951219512195122, |
|
"loss": 0.7233, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.21823934898390882, |
|
"learning_rate": 0.0001999916658654738, |
|
"loss": 0.7168, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.2468304527466075, |
|
"learning_rate": 0.0001999407400739705, |
|
"loss": 0.7519, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.2481744524594569, |
|
"learning_rate": 0.00019984354211555644, |
|
"loss": 0.7212, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.22553056449126135, |
|
"learning_rate": 0.00019970011699250152, |
|
"loss": 0.7351, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.20832972377638914, |
|
"learning_rate": 0.00019951053111006976, |
|
"loss": 0.7611, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.2276942421072934, |
|
"learning_rate": 0.00019927487224577402, |
|
"loss": 0.7373, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.22785202882473857, |
|
"learning_rate": 0.0001989932495087353, |
|
"loss": 0.7777, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.23403114621707224, |
|
"learning_rate": 0.0001986657932891657, |
|
"loss": 0.7814, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.21032447129887216, |
|
"learning_rate": 0.0001982926551979982, |
|
"loss": 0.7601, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.21778287055015214, |
|
"learning_rate": 0.00019787400799669154, |
|
"loss": 0.7465, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.23068074124934998, |
|
"learning_rate": 0.00019741004551724207, |
|
"loss": 0.7671, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.21498665804807518, |
|
"learning_rate": 0.00019690098257244064, |
|
"loss": 0.7448, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.2217460126233852, |
|
"learning_rate": 0.00019634705485641488, |
|
"loss": 0.7508, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.22491198578150204, |
|
"learning_rate": 0.00019574851883550395, |
|
"loss": 0.7528, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.21656015924096556, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 0.7528, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.21231516089906458, |
|
"learning_rate": 0.00019441875088341997, |
|
"loss": 0.7334, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.20043526486232421, |
|
"learning_rate": 0.00019368813462954316, |
|
"loss": 0.7092, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.2066905562271494, |
|
"learning_rate": 0.00019291414114031743, |
|
"loss": 0.69, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.2409977578699428, |
|
"learning_rate": 0.00019209712877166349, |
|
"loss": 0.7274, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.21478894589801012, |
|
"learning_rate": 0.00019123747579707275, |
|
"loss": 0.702, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.21830847034248066, |
|
"learning_rate": 0.00019033558023246844, |
|
"loss": 0.7474, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.22565415608082542, |
|
"learning_rate": 0.0001893918596519257, |
|
"loss": 0.7263, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.22451701734198667, |
|
"learning_rate": 0.00018840675099433636, |
|
"loss": 0.7376, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.22016338896296778, |
|
"learning_rate": 0.00018738071036110808, |
|
"loss": 0.6939, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.20607069335042727, |
|
"learning_rate": 0.00018631421280499116, |
|
"loss": 0.7351, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.20601552052171884, |
|
"learning_rate": 0.00018520775211013093, |
|
"loss": 0.7082, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.21774646892016394, |
|
"learning_rate": 0.00018406184056344782, |
|
"loss": 0.745, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.23099746146509867, |
|
"learning_rate": 0.00018287700871745036, |
|
"loss": 0.7707, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.2225513878139953, |
|
"learning_rate": 0.0001816538051445916, |
|
"loss": 0.72, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.2810648109589189, |
|
"learning_rate": 0.00018039279618328212, |
|
"loss": 0.7159, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.23360820743854752, |
|
"learning_rate": 0.00017909456567567772, |
|
"loss": 0.7916, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.2346981575638156, |
|
"learning_rate": 0.0001777597146973627, |
|
"loss": 0.7543, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.22472791649678256, |
|
"learning_rate": 0.00017638886127905427, |
|
"loss": 0.7145, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.2330382456080435, |
|
"learning_rate": 0.00017498264012045687, |
|
"loss": 0.6951, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.28462945710570964, |
|
"learning_rate": 0.00017354170229639856, |
|
"loss": 0.7218, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.23120564986808903, |
|
"learning_rate": 0.00017206671495538612, |
|
"loss": 0.6954, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.21787726424323484, |
|
"learning_rate": 0.0001705583610107178, |
|
"loss": 0.6993, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.22337741579847503, |
|
"learning_rate": 0.0001690173388242972, |
|
"loss": 0.7742, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.2418352997312942, |
|
"learning_rate": 0.00016744436188329456, |
|
"loss": 0.7385, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.25104847122600993, |
|
"learning_rate": 0.0001658401584698049, |
|
"loss": 0.7142, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.21545698908596508, |
|
"learning_rate": 0.00016420547132365635, |
|
"loss": 0.7043, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.24534521084917407, |
|
"learning_rate": 0.00016254105729852464, |
|
"loss": 0.6988, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.24621959592804224, |
|
"learning_rate": 0.00016084768701151263, |
|
"loss": 0.701, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.2441445089915858, |
|
"learning_rate": 0.00015912614448635782, |
|
"loss": 0.713, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.23144503509205255, |
|
"learning_rate": 0.00015737722679043248, |
|
"loss": 0.7185, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.23779196053246493, |
|
"learning_rate": 0.00015560174366570446, |
|
"loss": 0.6585, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.23922306500283833, |
|
"learning_rate": 0.00015380051715382996, |
|
"loss": 0.7025, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.25732276520746106, |
|
"learning_rate": 0.0001519743812155516, |
|
"loss": 0.6496, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.25676931335616193, |
|
"learning_rate": 0.00015012418134457755, |
|
"loss": 0.6946, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.25925282419918827, |
|
"learning_rate": 0.00014825077417612186, |
|
"loss": 0.7309, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.25723625109413206, |
|
"learning_rate": 0.0001463550270902851, |
|
"loss": 0.6929, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.2758091340120669, |
|
"learning_rate": 0.00014443781781046136, |
|
"loss": 0.7016, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.2368505670464136, |
|
"learning_rate": 0.0001425000339969554, |
|
"loss": 0.703, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.23084399243795903, |
|
"learning_rate": 0.00014054257283599973, |
|
"loss": 0.7102, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.2503463601524904, |
|
"learning_rate": 0.0001385663406243607, |
|
"loss": 0.7278, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.22226083699821775, |
|
"learning_rate": 0.00013657225234972695, |
|
"loss": 0.6753, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.26300812718557964, |
|
"learning_rate": 0.00013456123126707334, |
|
"loss": 0.6871, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.257794988184064, |
|
"learning_rate": 0.00013253420847119803, |
|
"loss": 0.737, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.23283253484592037, |
|
"learning_rate": 0.0001304921224656289, |
|
"loss": 0.6948, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.2489222484332262, |
|
"learning_rate": 0.0001284359187281004, |
|
"loss": 0.6807, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.24773496287366326, |
|
"learning_rate": 0.00012636654927280073, |
|
"loss": 0.7322, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.25284782064647693, |
|
"learning_rate": 0.0001242849722095936, |
|
"loss": 0.7038, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.2640705980904302, |
|
"learning_rate": 0.00012219215130041656, |
|
"loss": 0.6818, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.244353569188838, |
|
"learning_rate": 0.00012008905551306356, |
|
"loss": 0.6888, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.2582385397817055, |
|
"learning_rate": 0.00011797665857255621, |
|
"loss": 0.7347, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.2657939654189472, |
|
"learning_rate": 0.00011585593851031347, |
|
"loss": 0.7292, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.2412402754418769, |
|
"learning_rate": 0.00011372787721132648, |
|
"loss": 0.6793, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.25269309847633625, |
|
"learning_rate": 0.00011159345995955006, |
|
"loss": 0.6811, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.3555199335436584, |
|
"learning_rate": 0.00010945367498171993, |
|
"loss": 0.6632, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.2807299993278487, |
|
"learning_rate": 0.00010730951298980776, |
|
"loss": 0.7121, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.2676168527706757, |
|
"learning_rate": 0.00010516196672232539, |
|
"loss": 0.6897, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.2696033985636684, |
|
"learning_rate": 0.00010301203048469083, |
|
"loss": 0.7149, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.264343040293582, |
|
"learning_rate": 0.00010086069968886885, |
|
"loss": 0.697, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.24941452883154777, |
|
"learning_rate": 9.870897039249911e-05, |
|
"loss": 0.6739, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.27184754304212144, |
|
"learning_rate": 9.655783883772545e-05, |
|
"loss": 0.6771, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.25069232194770324, |
|
"learning_rate": 9.440830098993969e-05, |
|
"loss": 0.6628, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.26945962491120906, |
|
"learning_rate": 9.22613520766537e-05, |
|
"loss": 0.6501, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.27595385280343615, |
|
"learning_rate": 9.011798612671286e-05, |
|
"loss": 0.6923, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.23254427170051437, |
|
"learning_rate": 8.797919551006475e-05, |
|
"loss": 0.6687, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.2700843553748168, |
|
"learning_rate": 8.58459704782957e-05, |
|
"loss": 0.6922, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.29075126290562925, |
|
"learning_rate": 8.371929870614833e-05, |
|
"loss": 0.6838, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.2582533489019304, |
|
"learning_rate": 8.160016483423199e-05, |
|
"loss": 0.709, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.24306629955384693, |
|
"learning_rate": 7.948955001313811e-05, |
|
"loss": 0.6644, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.2792860639380007, |
|
"learning_rate": 7.738843144917119e-05, |
|
"loss": 0.7122, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.2717162268363558, |
|
"learning_rate": 7.529778195190645e-05, |
|
"loss": 0.7235, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.284678632001059, |
|
"learning_rate": 7.321856948378259e-05, |
|
"loss": 0.6916, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.26262649770739277, |
|
"learning_rate": 7.115175671193913e-05, |
|
"loss": 0.6741, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.2529748057506211, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.7025, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.28817490304932813, |
|
"learning_rate": 6.7059151777547e-05, |
|
"loss": 0.7071, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.28586738996038136, |
|
"learning_rate": 6.503525447487715e-05, |
|
"loss": 0.7223, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.27355383837426006, |
|
"learning_rate": 6.30275457109327e-05, |
|
"loss": 0.6878, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.2696146366032459, |
|
"learning_rate": 6.103695504692122e-05, |
|
"loss": 0.666, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.293084107951244, |
|
"learning_rate": 5.906440411843787e-05, |
|
"loss": 0.7165, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.2744477897912315, |
|
"learning_rate": 5.7110806208751655e-05, |
|
"loss": 0.6791, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.2719074571288193, |
|
"learning_rate": 5.5177065825958966e-05, |
|
"loss": 0.7183, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.29658031172179306, |
|
"learning_rate": 5.326407828419979e-05, |
|
"loss": 0.6741, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.25304272831172997, |
|
"learning_rate": 5.137272928913097e-05, |
|
"loss": 0.68, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.2676461633502972, |
|
"learning_rate": 4.9503894527847964e-05, |
|
"loss": 0.6593, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.31247663625681804, |
|
"learning_rate": 4.765843926344501e-05, |
|
"loss": 0.6863, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.2955325664002294, |
|
"learning_rate": 4.583721793440188e-05, |
|
"loss": 0.6728, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.2807174542636257, |
|
"learning_rate": 4.4041073758982335e-05, |
|
"loss": 0.6913, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.2989395763460932, |
|
"learning_rate": 4.227083834482728e-05, |
|
"loss": 0.6085, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.2636409419874345, |
|
"learning_rate": 4.052733130392367e-05, |
|
"loss": 0.676, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.2735419958012749, |
|
"learning_rate": 3.881135987312757e-05, |
|
"loss": 0.697, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.29812585759610105, |
|
"learning_rate": 3.712371854041654e-05, |
|
"loss": 0.6788, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.2858805204008935, |
|
"learning_rate": 3.546518867704499e-05, |
|
"loss": 0.6891, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.2711930626215174, |
|
"learning_rate": 3.383653817577216e-05, |
|
"loss": 0.6626, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.30583390146359735, |
|
"learning_rate": 3.223852109533112e-05, |
|
"loss": 0.6977, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.31918222012772746, |
|
"learning_rate": 3.0671877311302244e-05, |
|
"loss": 0.6546, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.27113125300550006, |
|
"learning_rate": 2.9137332173554043e-05, |
|
"loss": 0.6718, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.2933116316225334, |
|
"learning_rate": 2.763559617040876e-05, |
|
"loss": 0.677, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.3236593994578141, |
|
"learning_rate": 2.616736459968936e-05, |
|
"loss": 0.6685, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.30406592976690633, |
|
"learning_rate": 2.473331724679917e-05, |
|
"loss": 0.6746, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.27899362724771787, |
|
"learning_rate": 2.33341180699841e-05, |
|
"loss": 0.6667, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.2954598561484786, |
|
"learning_rate": 2.1970414892922442e-05, |
|
"loss": 0.7175, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.3100581301740447, |
|
"learning_rate": 2.0642839104785272e-05, |
|
"loss": 0.6979, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.2759099798186259, |
|
"learning_rate": 1.9352005367905536e-05, |
|
"loss": 0.7098, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.30931195338097267, |
|
"learning_rate": 1.8098511333192024e-05, |
|
"loss": 0.6593, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.32368942926694433, |
|
"learning_rate": 1.6882937363419203e-05, |
|
"loss": 0.6869, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.2776307386865438, |
|
"learning_rate": 1.570584626452173e-05, |
|
"loss": 0.6717, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.30301564173702233, |
|
"learning_rate": 1.4567783025017301e-05, |
|
"loss": 0.6681, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.31904417802084467, |
|
"learning_rate": 1.3469274563679402e-05, |
|
"loss": 0.6851, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.2774036633854793, |
|
"learning_rate": 1.2410829485575704e-05, |
|
"loss": 0.7023, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.2965220463492153, |
|
"learning_rate": 1.1392937846586215e-05, |
|
"loss": 0.6565, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.32675908474718157, |
|
"learning_rate": 1.0416070926509113e-05, |
|
"loss": 0.6487, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.3090900072007822, |
|
"learning_rate": 9.48068101086026e-06, |
|
"loss": 0.6976, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.309037705156712, |
|
"learning_rate": 8.58720118146662e-06, |
|
"loss": 0.66, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.3087738006371124, |
|
"learning_rate": 7.736045115951251e-06, |
|
"loss": 0.7067, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.3443047214751086, |
|
"learning_rate": 6.927606896202066e-06, |
|
"loss": 0.6968, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.30866169616965344, |
|
"learning_rate": 6.16226082591359e-06, |
|
"loss": 0.6595, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.26453835920017715, |
|
"learning_rate": 5.440361257285742e-06, |
|
"loss": 0.6873, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.2870240594644306, |
|
"learning_rate": 4.762242426960262e-06, |
|
"loss": 0.6897, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.31907803929807727, |
|
"learning_rate": 4.128218301270359e-06, |
|
"loss": 0.7062, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.3461230801807586, |
|
"learning_rate": 3.5385824308756587e-06, |
|
"loss": 0.6506, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.2982663104307397, |
|
"learning_rate": 2.9936078148492973e-06, |
|
"loss": 0.6722, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.27990503389995286, |
|
"learning_rate": 2.493546774280531e-06, |
|
"loss": 0.7049, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.3112912132178525, |
|
"learning_rate": 2.0386308354509942e-06, |
|
"loss": 0.7125, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.31758074243604545, |
|
"learning_rate": 1.6290706226390285e-06, |
|
"loss": 0.6857, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.3210813944651733, |
|
"learning_rate": 1.2650557606013635e-06, |
|
"loss": 0.6829, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.2880839335185284, |
|
"learning_rate": 9.46754786777726e-07, |
|
"loss": 0.7146, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.29564960884643837, |
|
"learning_rate": 6.74315073258569e-07, |
|
"loss": 0.7101, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.29256820612374973, |
|
"learning_rate": 4.4786275855247527e-07, |
|
"loss": 0.6478, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.33071981852297627, |
|
"learning_rate": 2.675026891844512e-07, |
|
"loss": 0.672, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.329286091376579, |
|
"learning_rate": 1.333183711524133e-07, |
|
"loss": 0.6438, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.3104373424505981, |
|
"learning_rate": 4.5371931264270864e-08, |
|
"loss": 0.6555, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.2971195933385636, |
|
"learning_rate": 3.7040883734462683e-09, |
|
"loss": 0.6774, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.926139771938324, |
|
"eval_runtime": 459.5724, |
|
"eval_samples_per_second": 5.026, |
|
"eval_steps_per_second": 0.316, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 812, |
|
"total_flos": 9607955794100224.0, |
|
"train_loss": 0.7061199183240899, |
|
"train_runtime": 29649.4111, |
|
"train_samples_per_second": 1.753, |
|
"train_steps_per_second": 0.027 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 812, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 9607955794100224.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|