|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 3898, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.18273725037333932, |
|
"learning_rate": 5.128205128205128e-07, |
|
"loss": 0.9609, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.4188407238663635, |
|
"learning_rate": 2.564102564102564e-06, |
|
"loss": 0.8848, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.19777627866810965, |
|
"learning_rate": 5.128205128205128e-06, |
|
"loss": 0.7625, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.22192343921645707, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 0.7876, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.18841857488548644, |
|
"learning_rate": 1.0256410256410256e-05, |
|
"loss": 0.8592, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.33681419457634554, |
|
"learning_rate": 1.282051282051282e-05, |
|
"loss": 0.8162, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.33600938003479874, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 0.8894, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.3176697356314787, |
|
"learning_rate": 1.794871794871795e-05, |
|
"loss": 0.7155, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.21204726045838998, |
|
"learning_rate": 2.0512820512820512e-05, |
|
"loss": 0.7677, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.19741016497576444, |
|
"learning_rate": 2.307692307692308e-05, |
|
"loss": 0.767, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.2867616395864828, |
|
"learning_rate": 2.564102564102564e-05, |
|
"loss": 0.7459, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.277593018616144, |
|
"learning_rate": 2.8205128205128207e-05, |
|
"loss": 0.716, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.20494371343645457, |
|
"learning_rate": 3.0769230769230774e-05, |
|
"loss": 0.7252, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.2514927327142473, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.7427, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.3569958514232062, |
|
"learning_rate": 3.58974358974359e-05, |
|
"loss": 0.766, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.24879302023855776, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.7423, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.25452303418558414, |
|
"learning_rate": 4.1025641025641023e-05, |
|
"loss": 0.6759, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.2839971702285102, |
|
"learning_rate": 4.358974358974359e-05, |
|
"loss": 0.7479, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.30384992603439903, |
|
"learning_rate": 4.615384615384616e-05, |
|
"loss": 0.6552, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.3852050595096914, |
|
"learning_rate": 4.871794871794872e-05, |
|
"loss": 0.6522, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.22298687548607224, |
|
"learning_rate": 5.128205128205128e-05, |
|
"loss": 0.7471, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.2048934487949906, |
|
"learning_rate": 5.384615384615385e-05, |
|
"loss": 0.7084, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.2069689289264241, |
|
"learning_rate": 5.6410256410256414e-05, |
|
"loss": 0.6885, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.24817118260446214, |
|
"learning_rate": 5.897435897435898e-05, |
|
"loss": 0.7736, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.40895048260028827, |
|
"learning_rate": 6.153846153846155e-05, |
|
"loss": 0.8098, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.35594857119791506, |
|
"learning_rate": 6.410256410256412e-05, |
|
"loss": 0.7543, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.21471650478613163, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.6914, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.3610348756628403, |
|
"learning_rate": 6.923076923076924e-05, |
|
"loss": 0.7253, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.26836543734793383, |
|
"learning_rate": 7.17948717948718e-05, |
|
"loss": 0.6539, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.2835751411318648, |
|
"learning_rate": 7.435897435897436e-05, |
|
"loss": 0.8097, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.20105648692084657, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 0.7385, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.16414727315119573, |
|
"learning_rate": 7.948717948717948e-05, |
|
"loss": 0.6527, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.24741062361895544, |
|
"learning_rate": 8.205128205128205e-05, |
|
"loss": 0.6857, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.20873983107979052, |
|
"learning_rate": 8.461538461538461e-05, |
|
"loss": 0.7498, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.22699678420428857, |
|
"learning_rate": 8.717948717948718e-05, |
|
"loss": 0.7112, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.19007788693963085, |
|
"learning_rate": 8.974358974358975e-05, |
|
"loss": 0.751, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.18866388272365925, |
|
"learning_rate": 9.230769230769232e-05, |
|
"loss": 0.6833, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.19818865870224087, |
|
"learning_rate": 9.487179487179487e-05, |
|
"loss": 0.6851, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.23302753556690228, |
|
"learning_rate": 9.743589743589744e-05, |
|
"loss": 0.7745, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.18540393542694006, |
|
"learning_rate": 0.0001, |
|
"loss": 0.7403, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.1839673690602772, |
|
"learning_rate": 0.00010256410256410256, |
|
"loss": 0.8231, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.22798420463327215, |
|
"learning_rate": 0.00010512820512820514, |
|
"loss": 0.6674, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.2464229066359474, |
|
"learning_rate": 0.0001076923076923077, |
|
"loss": 0.7846, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.19433765535813324, |
|
"learning_rate": 0.00011025641025641027, |
|
"loss": 0.7632, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.21304653327041365, |
|
"learning_rate": 0.00011282051282051283, |
|
"loss": 0.7541, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.1770514082218146, |
|
"learning_rate": 0.00011538461538461538, |
|
"loss": 0.6967, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.22467577773491165, |
|
"learning_rate": 0.00011794871794871796, |
|
"loss": 0.7131, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.1765044110348834, |
|
"learning_rate": 0.00012051282051282052, |
|
"loss": 0.7442, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.29795271013699604, |
|
"learning_rate": 0.0001230769230769231, |
|
"loss": 0.8024, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.14389624633185258, |
|
"learning_rate": 0.00012564102564102564, |
|
"loss": 0.7101, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.21079245502502064, |
|
"learning_rate": 0.00012820512820512823, |
|
"loss": 0.709, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.17829547036873233, |
|
"learning_rate": 0.00013076923076923077, |
|
"loss": 0.7443, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.2173226668602675, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 0.6832, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.26876676153975865, |
|
"learning_rate": 0.0001358974358974359, |
|
"loss": 0.7284, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.15830628083598494, |
|
"learning_rate": 0.00013846153846153847, |
|
"loss": 0.7194, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.19342822085523578, |
|
"learning_rate": 0.00014102564102564104, |
|
"loss": 0.7217, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.18871672612378254, |
|
"learning_rate": 0.0001435897435897436, |
|
"loss": 0.6593, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.21272446268662773, |
|
"learning_rate": 0.00014615384615384615, |
|
"loss": 0.6667, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.2155663432457548, |
|
"learning_rate": 0.00014871794871794872, |
|
"loss": 0.6597, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.44468877213339963, |
|
"learning_rate": 0.00015128205128205128, |
|
"loss": 0.7204, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.2164584239122828, |
|
"learning_rate": 0.00015384615384615385, |
|
"loss": 0.8244, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.1938136691775617, |
|
"learning_rate": 0.00015641025641025642, |
|
"loss": 0.6991, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.1823678116608096, |
|
"learning_rate": 0.00015897435897435896, |
|
"loss": 0.6541, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.1629985226358383, |
|
"learning_rate": 0.00016153846153846155, |
|
"loss": 0.7331, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.19599880954946838, |
|
"learning_rate": 0.0001641025641025641, |
|
"loss": 0.6446, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.23282506824657653, |
|
"learning_rate": 0.0001666666666666667, |
|
"loss": 0.6723, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.20570313944345442, |
|
"learning_rate": 0.00016923076923076923, |
|
"loss": 0.7924, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.18124135373904873, |
|
"learning_rate": 0.0001717948717948718, |
|
"loss": 0.8191, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.18929176365178993, |
|
"learning_rate": 0.00017435897435897436, |
|
"loss": 0.7289, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.19977748525194552, |
|
"learning_rate": 0.00017692307692307693, |
|
"loss": 0.7362, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.1990441990377106, |
|
"learning_rate": 0.0001794871794871795, |
|
"loss": 0.853, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.3087674819281357, |
|
"learning_rate": 0.00018205128205128207, |
|
"loss": 0.6986, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.25912532033253527, |
|
"learning_rate": 0.00018461538461538463, |
|
"loss": 0.6727, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.30170168244659684, |
|
"learning_rate": 0.0001871794871794872, |
|
"loss": 0.7641, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.23159332627603768, |
|
"learning_rate": 0.00018974358974358974, |
|
"loss": 0.7467, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.21437363907196583, |
|
"learning_rate": 0.00019230769230769233, |
|
"loss": 0.689, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.18673889188699844, |
|
"learning_rate": 0.00019487179487179487, |
|
"loss": 0.625, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.24453425122203856, |
|
"learning_rate": 0.00019743589743589744, |
|
"loss": 0.6991, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.19852275666733094, |
|
"learning_rate": 0.0002, |
|
"loss": 0.8059, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.183724259914749, |
|
"learning_rate": 0.00019999899748734544, |
|
"loss": 0.7681, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.2127551390821211, |
|
"learning_rate": 0.00019999598996948235, |
|
"loss": 0.6547, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.2684683587075289, |
|
"learning_rate": 0.00019999097750671223, |
|
"loss": 0.7287, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.2283900397579642, |
|
"learning_rate": 0.00019998396019953624, |
|
"loss": 0.6885, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.24702860686198994, |
|
"learning_rate": 0.00019997493818865318, |
|
"loss": 0.7212, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.241475192547802, |
|
"learning_rate": 0.0001999639116549566, |
|
"loss": 0.5949, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.23533430796243504, |
|
"learning_rate": 0.00019995088081953136, |
|
"loss": 0.7203, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.24956850711444106, |
|
"learning_rate": 0.00019993584594364894, |
|
"loss": 0.6447, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.20131118173728133, |
|
"learning_rate": 0.00019991880732876246, |
|
"loss": 0.699, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.2067287885265624, |
|
"learning_rate": 0.0001998997653165004, |
|
"loss": 0.74, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.20357844001623, |
|
"learning_rate": 0.00019987872028866003, |
|
"loss": 0.8027, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.23984712872623487, |
|
"learning_rate": 0.00019985567266719934, |
|
"loss": 0.7839, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.1897862238822482, |
|
"learning_rate": 0.00019983062291422908, |
|
"loss": 0.7433, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.24095134724957823, |
|
"learning_rate": 0.00019980357153200315, |
|
"loss": 0.7096, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.2852750359187981, |
|
"learning_rate": 0.00019977451906290854, |
|
"loss": 0.7163, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.20980919665053135, |
|
"learning_rate": 0.00019974346608945466, |
|
"loss": 0.7439, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.2295278218977822, |
|
"learning_rate": 0.0001997104132342614, |
|
"loss": 0.6644, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.19640395085838386, |
|
"learning_rate": 0.00019967536116004698, |
|
"loss": 0.8113, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.23742262528964145, |
|
"learning_rate": 0.00019963831056961433, |
|
"loss": 0.6803, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.2209130794031639, |
|
"learning_rate": 0.00019959926220583713, |
|
"loss": 0.7189, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.4158388376129303, |
|
"learning_rate": 0.000199558216851645, |
|
"loss": 0.7337, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.21568480709969076, |
|
"learning_rate": 0.00019951517533000764, |
|
"loss": 0.6068, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.23552741626776288, |
|
"learning_rate": 0.00019947013850391847, |
|
"loss": 0.7098, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.2277845662969724, |
|
"learning_rate": 0.00019942310727637724, |
|
"loss": 0.7695, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.24111051524751667, |
|
"learning_rate": 0.000199374082590372, |
|
"loss": 0.7586, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.19134678351380507, |
|
"learning_rate": 0.00019932306542886009, |
|
"loss": 0.7393, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.23589856188985894, |
|
"learning_rate": 0.0001992700568147485, |
|
"loss": 0.7205, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.252896098878244, |
|
"learning_rate": 0.00019921505781087334, |
|
"loss": 0.7336, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.25248409296825886, |
|
"learning_rate": 0.00019915806951997862, |
|
"loss": 0.7949, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.20137551343484256, |
|
"learning_rate": 0.00019909909308469398, |
|
"loss": 0.7558, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.2111631505276513, |
|
"learning_rate": 0.0001990381296875118, |
|
"loss": 0.6837, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.29184203037550105, |
|
"learning_rate": 0.0001989751805507637, |
|
"loss": 0.8117, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.2206173102779168, |
|
"learning_rate": 0.0001989102469365958, |
|
"loss": 0.8164, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.21507931251783377, |
|
"learning_rate": 0.00019884333014694345, |
|
"loss": 0.7133, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.22239957224386717, |
|
"learning_rate": 0.00019877443152350527, |
|
"loss": 0.7375, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.21591708491473222, |
|
"learning_rate": 0.00019870355244771607, |
|
"loss": 0.6708, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.25918641174746915, |
|
"learning_rate": 0.0001986306943407193, |
|
"loss": 0.7389, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.22262329838643527, |
|
"learning_rate": 0.00019855585866333835, |
|
"loss": 0.7479, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.2449638774409369, |
|
"learning_rate": 0.00019847904691604757, |
|
"loss": 0.7486, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.2485277159299432, |
|
"learning_rate": 0.00019840026063894193, |
|
"loss": 0.6979, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.28798789472500747, |
|
"learning_rate": 0.0001983195014117062, |
|
"loss": 0.7392, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.2560345551346601, |
|
"learning_rate": 0.00019823677085358335, |
|
"loss": 0.6557, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.2503029916293632, |
|
"learning_rate": 0.00019815207062334197, |
|
"loss": 0.684, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.22343460564706522, |
|
"learning_rate": 0.00019806540241924317, |
|
"loss": 0.7469, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.23478030369638528, |
|
"learning_rate": 0.00019797676797900633, |
|
"loss": 0.7651, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.23393987623099682, |
|
"learning_rate": 0.00019788616907977441, |
|
"loss": 0.7356, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.1835277686003351, |
|
"learning_rate": 0.0001977936075380783, |
|
"loss": 0.6319, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.22084991905168924, |
|
"learning_rate": 0.00019769908520980034, |
|
"loss": 0.7451, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.2532435897912756, |
|
"learning_rate": 0.00019760260399013708, |
|
"loss": 0.7217, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.23471511654283184, |
|
"learning_rate": 0.00019750416581356146, |
|
"loss": 0.7114, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.21710442836493216, |
|
"learning_rate": 0.0001974037726537838, |
|
"loss": 0.628, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.267924875976266, |
|
"learning_rate": 0.00019730142652371236, |
|
"loss": 0.6469, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.268474985537624, |
|
"learning_rate": 0.00019719712947541295, |
|
"loss": 0.6829, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.16700886295844172, |
|
"learning_rate": 0.0001970908836000678, |
|
"loss": 0.6877, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.22055090509747258, |
|
"learning_rate": 0.00019698269102793358, |
|
"loss": 0.6525, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.27128839715879177, |
|
"learning_rate": 0.00019687255392829877, |
|
"loss": 0.6582, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.21159901567551387, |
|
"learning_rate": 0.00019676047450944008, |
|
"loss": 0.5732, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.24374137032809187, |
|
"learning_rate": 0.0001966464550185782, |
|
"loss": 0.7634, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.24672679998063676, |
|
"learning_rate": 0.00019653049774183282, |
|
"loss": 0.5845, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.2482930627863346, |
|
"learning_rate": 0.0001964126050041767, |
|
"loss": 0.7651, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.2553369576381814, |
|
"learning_rate": 0.000196292779169389, |
|
"loss": 0.6712, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.30911752230079176, |
|
"learning_rate": 0.0001961710226400081, |
|
"loss": 0.7166, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.31535946743812887, |
|
"learning_rate": 0.00019604733785728317, |
|
"loss": 0.7117, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.28460415510467557, |
|
"learning_rate": 0.00019592172730112544, |
|
"loss": 0.709, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.26288174322385877, |
|
"learning_rate": 0.00019579419349005837, |
|
"loss": 0.7949, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.24918287719559648, |
|
"learning_rate": 0.00019566473898116713, |
|
"loss": 0.7045, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.26861534205040266, |
|
"learning_rate": 0.00019553336637004735, |
|
"loss": 0.7401, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.20350229137993933, |
|
"learning_rate": 0.0001954000782907532, |
|
"loss": 0.7521, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.253527308804326, |
|
"learning_rate": 0.00019526487741574437, |
|
"loss": 0.7833, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.27106014622439245, |
|
"learning_rate": 0.00019512776645583263, |
|
"loss": 0.6898, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.24534894717688374, |
|
"learning_rate": 0.0001949887481601274, |
|
"loss": 0.8102, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.37673623038318094, |
|
"learning_rate": 0.00019484782531598073, |
|
"loss": 0.6929, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.28044355813302363, |
|
"learning_rate": 0.0001947050007489313, |
|
"loss": 0.8396, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.25883167182505523, |
|
"learning_rate": 0.00019456027732264784, |
|
"loss": 0.7169, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.33212091144862455, |
|
"learning_rate": 0.00019441365793887162, |
|
"loss": 0.7562, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.2265256995053789, |
|
"learning_rate": 0.00019426514553735848, |
|
"loss": 0.733, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.1952769250523588, |
|
"learning_rate": 0.00019411474309581958, |
|
"loss": 0.6875, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.23715930446049704, |
|
"learning_rate": 0.00019396245362986197, |
|
"loss": 0.7183, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.26737163458762153, |
|
"learning_rate": 0.00019380828019292798, |
|
"loss": 0.6693, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.3093664057575012, |
|
"learning_rate": 0.00019365222587623405, |
|
"loss": 0.6971, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.22025157297022468, |
|
"learning_rate": 0.00019349429380870873, |
|
"loss": 0.7437, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.23171097491730766, |
|
"learning_rate": 0.00019333448715692995, |
|
"loss": 0.7528, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.23951739750544596, |
|
"learning_rate": 0.0001931728091250615, |
|
"loss": 0.7849, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.24639640582380729, |
|
"learning_rate": 0.00019300926295478884, |
|
"loss": 0.7688, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.24023205632785313, |
|
"learning_rate": 0.00019284385192525405, |
|
"loss": 0.6915, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.2548147235517417, |
|
"learning_rate": 0.0001926765793529902, |
|
"loss": 0.7137, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.24890950536791295, |
|
"learning_rate": 0.00019250744859185468, |
|
"loss": 0.7904, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.31444435610399357, |
|
"learning_rate": 0.00019233646303296205, |
|
"loss": 0.708, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.25866961126213517, |
|
"learning_rate": 0.000192163626104616, |
|
"loss": 0.6808, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.2605403676294042, |
|
"learning_rate": 0.00019198894127224074, |
|
"loss": 0.7549, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.36813656529214944, |
|
"learning_rate": 0.00019181241203831137, |
|
"loss": 0.7715, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.30866872454744043, |
|
"learning_rate": 0.0001916340419422837, |
|
"loss": 0.7488, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.26715773589614006, |
|
"learning_rate": 0.00019145383456052327, |
|
"loss": 0.6933, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.2405309690369464, |
|
"learning_rate": 0.00019127179350623372, |
|
"loss": 0.6964, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.34837890747063266, |
|
"learning_rate": 0.00019108792242938425, |
|
"loss": 0.6903, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.28304812900111975, |
|
"learning_rate": 0.0001909022250166365, |
|
"loss": 0.6982, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.2123369848990679, |
|
"learning_rate": 0.00019071470499127058, |
|
"loss": 0.6929, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.2836396726336964, |
|
"learning_rate": 0.00019052536611311046, |
|
"loss": 0.7434, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.2247103209214737, |
|
"learning_rate": 0.0001903342121784486, |
|
"loss": 0.6816, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.3242567099018861, |
|
"learning_rate": 0.00019014124701996973, |
|
"loss": 0.72, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.2339879402133849, |
|
"learning_rate": 0.00018994647450667413, |
|
"loss": 0.7041, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.22751659387372616, |
|
"learning_rate": 0.00018974989854379996, |
|
"loss": 0.7511, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.26549639698173727, |
|
"learning_rate": 0.00018955152307274507, |
|
"loss": 0.6897, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.21860533117362732, |
|
"learning_rate": 0.00018935135207098785, |
|
"loss": 0.6648, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.2533868012784127, |
|
"learning_rate": 0.00018914938955200754, |
|
"loss": 0.6798, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.24616813985140945, |
|
"learning_rate": 0.00018894563956520374, |
|
"loss": 0.7683, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.2668291016708883, |
|
"learning_rate": 0.0001887401061958153, |
|
"loss": 0.7791, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.2556952354899014, |
|
"learning_rate": 0.00018853279356483826, |
|
"loss": 0.7733, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.2824190700646869, |
|
"learning_rate": 0.00018832370582894334, |
|
"loss": 0.6439, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.3193420311338137, |
|
"learning_rate": 0.00018811284718039256, |
|
"loss": 0.6791, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.25742868616486125, |
|
"learning_rate": 0.00018790022184695523, |
|
"loss": 0.6835, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.31407801057679396, |
|
"learning_rate": 0.00018768583409182305, |
|
"loss": 0.7338, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.2268676531535162, |
|
"learning_rate": 0.00018746968821352483, |
|
"loss": 0.74, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.2706082597185793, |
|
"learning_rate": 0.00018725178854584007, |
|
"loss": 0.7372, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.23942119358366734, |
|
"learning_rate": 0.00018703213945771229, |
|
"loss": 0.7486, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.23415626163520642, |
|
"learning_rate": 0.00018681074535316125, |
|
"loss": 0.8149, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.23625273115645273, |
|
"learning_rate": 0.00018658761067119484, |
|
"loss": 0.6954, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.29616413603025993, |
|
"learning_rate": 0.00018636273988571991, |
|
"loss": 0.6825, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.24204431321093076, |
|
"learning_rate": 0.00018613613750545255, |
|
"loss": 0.6402, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.23843536272675503, |
|
"learning_rate": 0.0001859078080738279, |
|
"loss": 0.7034, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.2422607510213604, |
|
"learning_rate": 0.00018567775616890888, |
|
"loss": 0.7258, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.2367697702106968, |
|
"learning_rate": 0.00018544598640329432, |
|
"loss": 0.7125, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.21770302111684567, |
|
"learning_rate": 0.00018521250342402672, |
|
"loss": 0.7651, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.19822970981192417, |
|
"learning_rate": 0.00018497731191249894, |
|
"loss": 0.7705, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.23080773465076526, |
|
"learning_rate": 0.00018474041658436027, |
|
"loss": 0.7721, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.22821163739265107, |
|
"learning_rate": 0.000184501822189422, |
|
"loss": 0.7324, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.22804940526863002, |
|
"learning_rate": 0.0001842615335115621, |
|
"loss": 0.7121, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.2703519880608756, |
|
"learning_rate": 0.00018401955536862948, |
|
"loss": 0.8063, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.27550361128710155, |
|
"learning_rate": 0.00018377589261234705, |
|
"loss": 0.7164, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.19381814425434957, |
|
"learning_rate": 0.0001835305501282148, |
|
"loss": 0.8273, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.26340620883694543, |
|
"learning_rate": 0.00018328353283541158, |
|
"loss": 0.7239, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.2620607615738064, |
|
"learning_rate": 0.00018303484568669667, |
|
"loss": 0.6904, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.15029608182232604, |
|
"learning_rate": 0.00018278449366831035, |
|
"loss": 0.6003, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.2187384668280594, |
|
"learning_rate": 0.00018253248179987388, |
|
"loss": 0.707, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.2560638257516859, |
|
"learning_rate": 0.00018227881513428908, |
|
"loss": 0.7375, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.26865842108524934, |
|
"learning_rate": 0.0001820234987576368, |
|
"loss": 0.7077, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.24178826048580734, |
|
"learning_rate": 0.00018176653778907492, |
|
"loss": 0.7354, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.27854735242372025, |
|
"learning_rate": 0.00018150793738073602, |
|
"loss": 0.6523, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.28792834941972884, |
|
"learning_rate": 0.00018124770271762364, |
|
"loss": 0.6969, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.30573100271052395, |
|
"learning_rate": 0.00018098583901750867, |
|
"loss": 0.7093, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.35336463231783916, |
|
"learning_rate": 0.00018072235153082455, |
|
"loss": 0.6441, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.23753636938939662, |
|
"learning_rate": 0.00018045724554056214, |
|
"loss": 0.7617, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.2830346076575104, |
|
"learning_rate": 0.0001801905263621636, |
|
"loss": 0.7642, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.2431998404354548, |
|
"learning_rate": 0.0001799221993434159, |
|
"loss": 0.7183, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.19371637012307855, |
|
"learning_rate": 0.00017965226986434377, |
|
"loss": 0.6834, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.371529742091536, |
|
"learning_rate": 0.00017938074333710157, |
|
"loss": 0.7232, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.26377679420092087, |
|
"learning_rate": 0.00017910762520586485, |
|
"loss": 0.7247, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.2285136389359427, |
|
"learning_rate": 0.00017883292094672128, |
|
"loss": 0.7205, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.29796654170976006, |
|
"learning_rate": 0.00017855663606756078, |
|
"loss": 0.6848, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.2046721683597502, |
|
"learning_rate": 0.00017827877610796514, |
|
"loss": 0.6759, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.3268765805116052, |
|
"learning_rate": 0.00017799934663909682, |
|
"loss": 0.716, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.285963694250622, |
|
"learning_rate": 0.00017771835326358743, |
|
"loss": 0.6486, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.2520717807255334, |
|
"learning_rate": 0.00017743580161542525, |
|
"loss": 0.6561, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.1981002914068182, |
|
"learning_rate": 0.00017715169735984233, |
|
"loss": 0.6599, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.26598319836843726, |
|
"learning_rate": 0.00017686604619320093, |
|
"loss": 0.7375, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.26235482801170973, |
|
"learning_rate": 0.0001765788538428792, |
|
"loss": 0.8063, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.3377576354866203, |
|
"learning_rate": 0.00017629012606715648, |
|
"loss": 0.7493, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.3678264436119463, |
|
"learning_rate": 0.00017599986865509767, |
|
"loss": 0.6989, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.32747709902543076, |
|
"learning_rate": 0.00017570808742643746, |
|
"loss": 0.7948, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.24912874308295901, |
|
"learning_rate": 0.00017541478823146327, |
|
"loss": 0.7187, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.2353274982649047, |
|
"learning_rate": 0.00017511997695089822, |
|
"loss": 0.7232, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.2537606981534416, |
|
"learning_rate": 0.00017482365949578302, |
|
"loss": 0.6607, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.2372984581891708, |
|
"learning_rate": 0.00017452584180735774, |
|
"loss": 0.732, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.23094752394219156, |
|
"learning_rate": 0.00017422652985694237, |
|
"loss": 0.7065, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.21971428715056102, |
|
"learning_rate": 0.00017392572964581725, |
|
"loss": 0.7012, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.21591121328899532, |
|
"learning_rate": 0.00017362344720510278, |
|
"loss": 0.7083, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.2779472292134201, |
|
"learning_rate": 0.00017331968859563834, |
|
"loss": 0.7777, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.37145422903296105, |
|
"learning_rate": 0.00017301445990786102, |
|
"loss": 0.7592, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.2838257377376314, |
|
"learning_rate": 0.00017270776726168317, |
|
"loss": 0.7147, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.24195971773139494, |
|
"learning_rate": 0.00017239961680637, |
|
"loss": 0.7442, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.2531071173762594, |
|
"learning_rate": 0.00017209001472041617, |
|
"loss": 0.7964, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.2392782350831628, |
|
"learning_rate": 0.0001717789672114218, |
|
"loss": 0.7626, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.24505477687731997, |
|
"learning_rate": 0.00017146648051596822, |
|
"loss": 0.7101, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.27488228336035475, |
|
"learning_rate": 0.0001711525608994927, |
|
"loss": 0.6718, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.1916616192257934, |
|
"learning_rate": 0.00017083721465616306, |
|
"loss": 0.6653, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.2960082493036361, |
|
"learning_rate": 0.00017052044810875126, |
|
"loss": 0.8172, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.29672233161501615, |
|
"learning_rate": 0.00017020226760850677, |
|
"loss": 0.695, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.29971905663286563, |
|
"learning_rate": 0.00016988267953502913, |
|
"loss": 0.6921, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.1972856751409119, |
|
"learning_rate": 0.0001695616902961401, |
|
"loss": 0.6707, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.24592016529147168, |
|
"learning_rate": 0.00016923930632775516, |
|
"loss": 0.7049, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.2362085501756166, |
|
"learning_rate": 0.00016891553409375444, |
|
"loss": 0.7222, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.2720995150566844, |
|
"learning_rate": 0.00016859038008585326, |
|
"loss": 0.7355, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.31383124999534473, |
|
"learning_rate": 0.0001682638508234717, |
|
"loss": 0.6735, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.19810610550474098, |
|
"learning_rate": 0.0001679359528536041, |
|
"loss": 0.6522, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.31481804318721185, |
|
"learning_rate": 0.0001676066927506878, |
|
"loss": 0.7391, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.27086869426476023, |
|
"learning_rate": 0.00016727607711647114, |
|
"loss": 0.6897, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.22378657478817104, |
|
"learning_rate": 0.00016694411257988135, |
|
"loss": 0.6582, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.2277896285249334, |
|
"learning_rate": 0.00016661080579689132, |
|
"loss": 0.7067, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.24128076526967204, |
|
"learning_rate": 0.00016627616345038642, |
|
"loss": 0.6632, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.425397202711577, |
|
"learning_rate": 0.0001659401922500304, |
|
"loss": 0.7114, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.2401567805733719, |
|
"learning_rate": 0.0001656028989321309, |
|
"loss": 0.6438, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.2153720205033084, |
|
"learning_rate": 0.00016526429025950424, |
|
"loss": 0.6838, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.22736402630025615, |
|
"learning_rate": 0.00016492437302134008, |
|
"loss": 0.7221, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.23982260160339397, |
|
"learning_rate": 0.00016458315403306502, |
|
"loss": 0.7908, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.27541369809262867, |
|
"learning_rate": 0.00016424064013620615, |
|
"loss": 0.6968, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.2658031068926381, |
|
"learning_rate": 0.0001638968381982538, |
|
"loss": 0.6988, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.22486989496520882, |
|
"learning_rate": 0.0001635517551125238, |
|
"loss": 0.81, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.30674338452959843, |
|
"learning_rate": 0.0001632053977980194, |
|
"loss": 0.7256, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.30285588055303475, |
|
"learning_rate": 0.0001628577731992924, |
|
"loss": 0.7188, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.36884764888792976, |
|
"learning_rate": 0.000162508888286304, |
|
"loss": 0.7081, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.2400495764148457, |
|
"learning_rate": 0.00016215875005428499, |
|
"loss": 0.7678, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.20873038269420108, |
|
"learning_rate": 0.00016180736552359553, |
|
"loss": 0.7497, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.2457699326637798, |
|
"learning_rate": 0.0001614547417395844, |
|
"loss": 0.7734, |
|
"step": 1405 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.31218574475793187, |
|
"learning_rate": 0.00016110088577244773, |
|
"loss": 0.7162, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.27454082866628093, |
|
"learning_rate": 0.0001607458047170872, |
|
"loss": 0.745, |
|
"step": 1415 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.28758887844401904, |
|
"learning_rate": 0.00016038950569296785, |
|
"loss": 0.6231, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.21604728751635988, |
|
"learning_rate": 0.00016003199584397528, |
|
"loss": 0.8008, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.23310840527224116, |
|
"learning_rate": 0.00015967328233827249, |
|
"loss": 0.6987, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.2568649534565718, |
|
"learning_rate": 0.000159313372368156, |
|
"loss": 0.6466, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.32113279474719314, |
|
"learning_rate": 0.00015895227314991178, |
|
"loss": 0.6838, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.296258785686713, |
|
"learning_rate": 0.0001585899919236706, |
|
"loss": 0.6935, |
|
"step": 1445 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.23501470975504843, |
|
"learning_rate": 0.00015822653595326275, |
|
"loss": 0.7261, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.2783256304635712, |
|
"learning_rate": 0.0001578619125260724, |
|
"loss": 0.7512, |
|
"step": 1455 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.268618761910228, |
|
"learning_rate": 0.00015749612895289152, |
|
"loss": 0.6482, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.21978849504732878, |
|
"learning_rate": 0.00015712919256777335, |
|
"loss": 0.7327, |
|
"step": 1465 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.23357753091278965, |
|
"learning_rate": 0.00015676111072788527, |
|
"loss": 0.681, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.271489123229419, |
|
"learning_rate": 0.0001563918908133614, |
|
"loss": 0.7193, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.31414437765976133, |
|
"learning_rate": 0.00015602154022715435, |
|
"loss": 0.7501, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.27016203325071253, |
|
"learning_rate": 0.00015565006639488722, |
|
"loss": 0.6972, |
|
"step": 1485 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.22532821311440346, |
|
"learning_rate": 0.0001552774767647043, |
|
"loss": 0.6922, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.27403456862921993, |
|
"learning_rate": 0.000154903778807122, |
|
"loss": 0.6949, |
|
"step": 1495 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.27739513562892554, |
|
"learning_rate": 0.0001545289800148789, |
|
"loss": 0.7243, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.2238572112501968, |
|
"learning_rate": 0.00015415308790278572, |
|
"loss": 0.7353, |
|
"step": 1505 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.24964811413280275, |
|
"learning_rate": 0.0001537761100075744, |
|
"loss": 0.7345, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.31756535201065944, |
|
"learning_rate": 0.00015339805388774714, |
|
"loss": 0.7856, |
|
"step": 1515 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.23296901823074231, |
|
"learning_rate": 0.00015301892712342482, |
|
"loss": 0.729, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.2352427696844548, |
|
"learning_rate": 0.00015263873731619508, |
|
"loss": 0.7662, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.2761737699751625, |
|
"learning_rate": 0.00015225749208895968, |
|
"loss": 0.7204, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.3417520503815876, |
|
"learning_rate": 0.000151875199085782, |
|
"loss": 0.7737, |
|
"step": 1535 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.2668391012505777, |
|
"learning_rate": 0.0001514918659717335, |
|
"loss": 0.7105, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.30187672878563326, |
|
"learning_rate": 0.00015110750043274008, |
|
"loss": 0.7326, |
|
"step": 1545 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.2242593810622975, |
|
"learning_rate": 0.00015072211017542813, |
|
"loss": 0.7472, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.29972249141011964, |
|
"learning_rate": 0.0001503357029269698, |
|
"loss": 0.6271, |
|
"step": 1555 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.26475447856915474, |
|
"learning_rate": 0.00014994828643492827, |
|
"loss": 0.7354, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.26450457112724596, |
|
"learning_rate": 0.00014955986846710222, |
|
"loss": 0.7264, |
|
"step": 1565 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.3219325804352173, |
|
"learning_rate": 0.00014917045681137026, |
|
"loss": 0.689, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.26984976027685303, |
|
"learning_rate": 0.00014878005927553456, |
|
"loss": 0.6566, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.31512358131320956, |
|
"learning_rate": 0.0001483886836871646, |
|
"loss": 0.7099, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.27177868856475285, |
|
"learning_rate": 0.00014799633789343994, |
|
"loss": 0.7864, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.2905196107184957, |
|
"learning_rate": 0.00014760302976099304, |
|
"loss": 0.6429, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.2429940903609081, |
|
"learning_rate": 0.00014720876717575155, |
|
"loss": 0.6899, |
|
"step": 1595 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.29122627651097244, |
|
"learning_rate": 0.00014681355804278001, |
|
"loss": 0.6956, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.214312815706962, |
|
"learning_rate": 0.00014641741028612162, |
|
"loss": 0.641, |
|
"step": 1605 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.1858785757380131, |
|
"learning_rate": 0.00014602033184863913, |
|
"loss": 0.7175, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.2755921853014141, |
|
"learning_rate": 0.00014562233069185572, |
|
"loss": 0.8379, |
|
"step": 1615 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.25002568814104187, |
|
"learning_rate": 0.00014522341479579533, |
|
"loss": 0.7584, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.257559638932229, |
|
"learning_rate": 0.0001448235921588226, |
|
"loss": 0.7739, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.20960264146716834, |
|
"learning_rate": 0.00014442287079748263, |
|
"loss": 0.7014, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.2521461373853711, |
|
"learning_rate": 0.00014402125874634012, |
|
"loss": 0.6859, |
|
"step": 1635 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.249737216302013, |
|
"learning_rate": 0.00014361876405781832, |
|
"loss": 0.7109, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.2126565997638845, |
|
"learning_rate": 0.00014321539480203764, |
|
"loss": 0.7759, |
|
"step": 1645 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.32895767720771935, |
|
"learning_rate": 0.00014281115906665374, |
|
"loss": 0.7, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.24271085601266817, |
|
"learning_rate": 0.00014240606495669538, |
|
"loss": 0.6983, |
|
"step": 1655 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.3030757588062575, |
|
"learning_rate": 0.00014200012059440207, |
|
"loss": 0.6991, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.37464984671186286, |
|
"learning_rate": 0.00014159333411906095, |
|
"loss": 0.7526, |
|
"step": 1665 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.23935406631920386, |
|
"learning_rate": 0.00014118571368684383, |
|
"loss": 0.7923, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.296146472400592, |
|
"learning_rate": 0.00014077726747064353, |
|
"loss": 0.6202, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.23565535010314678, |
|
"learning_rate": 0.00014036800365991008, |
|
"loss": 0.6796, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.2660771376040614, |
|
"learning_rate": 0.00013995793046048643, |
|
"loss": 0.6981, |
|
"step": 1685 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.264009781126536, |
|
"learning_rate": 0.00013954705609444404, |
|
"loss": 0.6901, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.28853297225085933, |
|
"learning_rate": 0.0001391353887999179, |
|
"loss": 0.7664, |
|
"step": 1695 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.24731691813618162, |
|
"learning_rate": 0.00013872293683094152, |
|
"loss": 0.6689, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.2594833661555708, |
|
"learning_rate": 0.00013830970845728126, |
|
"loss": 0.7701, |
|
"step": 1705 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.20644246333852043, |
|
"learning_rate": 0.00013789571196427055, |
|
"loss": 0.6513, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.2339458014501443, |
|
"learning_rate": 0.00013748095565264384, |
|
"loss": 0.6494, |
|
"step": 1715 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.2961333190555473, |
|
"learning_rate": 0.00013706544783837022, |
|
"loss": 0.7054, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.27743496626408143, |
|
"learning_rate": 0.00013664919685248649, |
|
"loss": 0.7295, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.27257332981945664, |
|
"learning_rate": 0.00013623221104093025, |
|
"loss": 0.8299, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.2592802724543621, |
|
"learning_rate": 0.0001358144987643726, |
|
"loss": 0.7277, |
|
"step": 1735 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.2238450594222949, |
|
"learning_rate": 0.00013539606839805036, |
|
"loss": 0.7238, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.2735424814559735, |
|
"learning_rate": 0.00013497692833159828, |
|
"loss": 0.7015, |
|
"step": 1745 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.2332151330743781, |
|
"learning_rate": 0.00013455708696888085, |
|
"loss": 0.6969, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.23230692211371998, |
|
"learning_rate": 0.00013413655272782356, |
|
"loss": 0.7266, |
|
"step": 1755 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.23959034038420193, |
|
"learning_rate": 0.00013371533404024438, |
|
"loss": 0.7636, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.25479290452971226, |
|
"learning_rate": 0.00013329343935168466, |
|
"loss": 0.7075, |
|
"step": 1765 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.23415173017919827, |
|
"learning_rate": 0.00013287087712123962, |
|
"loss": 0.7747, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.2286511684566787, |
|
"learning_rate": 0.000132447655821389, |
|
"loss": 0.6613, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.2976728303873654, |
|
"learning_rate": 0.00013202378393782692, |
|
"loss": 0.8001, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.26302357314609043, |
|
"learning_rate": 0.00013159926996929192, |
|
"loss": 0.6637, |
|
"step": 1785 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.25180945871071514, |
|
"learning_rate": 0.00013117412242739655, |
|
"loss": 0.6515, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.22843461927288725, |
|
"learning_rate": 0.00013074834983645657, |
|
"loss": 0.7534, |
|
"step": 1795 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.2746800608584564, |
|
"learning_rate": 0.00013032196073332027, |
|
"loss": 0.7448, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.23358914777160025, |
|
"learning_rate": 0.000129894963667197, |
|
"loss": 0.7462, |
|
"step": 1805 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.21563694009001097, |
|
"learning_rate": 0.00012946736719948607, |
|
"loss": 0.7174, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.25466538845306436, |
|
"learning_rate": 0.00012903917990360485, |
|
"loss": 0.6958, |
|
"step": 1815 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.23650763866157107, |
|
"learning_rate": 0.000128610410364817, |
|
"loss": 0.6948, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.32664640496556885, |
|
"learning_rate": 0.00012818106718006023, |
|
"loss": 0.6132, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.2195897657124799, |
|
"learning_rate": 0.00012775115895777417, |
|
"loss": 0.7536, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.23682325415599154, |
|
"learning_rate": 0.0001273206943177274, |
|
"loss": 0.6292, |
|
"step": 1835 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.2708829289481542, |
|
"learning_rate": 0.00012688968189084493, |
|
"loss": 0.7273, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.26405392878632394, |
|
"learning_rate": 0.000126458130319035, |
|
"loss": 0.7254, |
|
"step": 1845 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.2710846037228565, |
|
"learning_rate": 0.00012602604825501587, |
|
"loss": 0.7051, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.18625479643676604, |
|
"learning_rate": 0.00012559344436214223, |
|
"loss": 0.634, |
|
"step": 1855 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.23174335299007431, |
|
"learning_rate": 0.00012516032731423165, |
|
"loss": 0.721, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.2674954048671644, |
|
"learning_rate": 0.00012472670579539055, |
|
"loss": 0.7039, |
|
"step": 1865 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.23350249784430052, |
|
"learning_rate": 0.00012429258849984014, |
|
"loss": 0.7119, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.2307966491739631, |
|
"learning_rate": 0.00012385798413174206, |
|
"loss": 0.7267, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.2587108210616084, |
|
"learning_rate": 0.00012342290140502388, |
|
"loss": 0.7738, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.22704947603538148, |
|
"learning_rate": 0.00012298734904320438, |
|
"loss": 0.6771, |
|
"step": 1885 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.24005242145344702, |
|
"learning_rate": 0.00012255133577921868, |
|
"loss": 0.7774, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.21236521326266408, |
|
"learning_rate": 0.00012211487035524305, |
|
"loss": 0.689, |
|
"step": 1895 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.26548226590256707, |
|
"learning_rate": 0.0001216779615225197, |
|
"loss": 0.6958, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.28092405712871943, |
|
"learning_rate": 0.00012124061804118137, |
|
"loss": 0.7061, |
|
"step": 1905 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.24730560435280138, |
|
"learning_rate": 0.00012080284868007541, |
|
"loss": 0.6964, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.1991516776169903, |
|
"learning_rate": 0.00012036466221658847, |
|
"loss": 0.72, |
|
"step": 1915 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.29645767275685886, |
|
"learning_rate": 0.0001199260674364699, |
|
"loss": 0.7778, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.25070174095345915, |
|
"learning_rate": 0.00011948707313365614, |
|
"loss": 0.8114, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.27676560069954154, |
|
"learning_rate": 0.00011904768811009405, |
|
"loss": 0.7192, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.3078376640692026, |
|
"learning_rate": 0.00011860792117556454, |
|
"loss": 0.709, |
|
"step": 1935 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.2049781937326678, |
|
"learning_rate": 0.00011816778114750593, |
|
"loss": 0.7672, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.21305453207892233, |
|
"learning_rate": 0.00011772727685083724, |
|
"loss": 0.7744, |
|
"step": 1945 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.24046462080108547, |
|
"learning_rate": 0.00011728641711778103, |
|
"loss": 0.7229, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.21072752361715988, |
|
"learning_rate": 0.00011684521078768658, |
|
"loss": 0.6853, |
|
"step": 1955 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.17170386164857987, |
|
"learning_rate": 0.00011640366670685248, |
|
"loss": 0.6348, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.27104402407445916, |
|
"learning_rate": 0.00011596179372834924, |
|
"loss": 0.8441, |
|
"step": 1965 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.24600997987590698, |
|
"learning_rate": 0.00011551960071184195, |
|
"loss": 0.8251, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.22967416303582927, |
|
"learning_rate": 0.00011507709652341256, |
|
"loss": 0.716, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.24788639781264693, |
|
"learning_rate": 0.00011463429003538196, |
|
"loss": 0.7307, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.25315558172885294, |
|
"learning_rate": 0.00011419119012613233, |
|
"loss": 0.7196, |
|
"step": 1985 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.24182617769996623, |
|
"learning_rate": 0.000113747805679929, |
|
"loss": 0.8012, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.2500833847981834, |
|
"learning_rate": 0.00011330414558674234, |
|
"loss": 0.7113, |
|
"step": 1995 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.3651534404891642, |
|
"learning_rate": 0.00011286021874206952, |
|
"loss": 0.6693, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.3146361713006211, |
|
"learning_rate": 0.00011241603404675609, |
|
"loss": 0.7867, |
|
"step": 2005 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.2994271238230683, |
|
"learning_rate": 0.00011197160040681762, |
|
"loss": 0.8188, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.2287392648112236, |
|
"learning_rate": 0.00011152692673326111, |
|
"loss": 0.7414, |
|
"step": 2015 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.21561987456802226, |
|
"learning_rate": 0.0001110820219419062, |
|
"loss": 0.743, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.26286058284503494, |
|
"learning_rate": 0.00011063689495320661, |
|
"loss": 0.709, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.23831373981952758, |
|
"learning_rate": 0.0001101915546920711, |
|
"loss": 0.7487, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.4169631396437136, |
|
"learning_rate": 0.00010974601008768461, |
|
"loss": 0.6595, |
|
"step": 2035 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.23258437702962542, |
|
"learning_rate": 0.00010930027007332923, |
|
"loss": 0.6659, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.23047360031102437, |
|
"learning_rate": 0.00010885434358620507, |
|
"loss": 0.7374, |
|
"step": 2045 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.2586140902144001, |
|
"learning_rate": 0.00010840823956725103, |
|
"loss": 0.7431, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.2700901236204115, |
|
"learning_rate": 0.00010796196696096564, |
|
"loss": 0.7076, |
|
"step": 2055 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.20102755645586845, |
|
"learning_rate": 0.00010751553471522757, |
|
"loss": 0.7332, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.25286502232358293, |
|
"learning_rate": 0.00010706895178111634, |
|
"loss": 0.7297, |
|
"step": 2065 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.2664571423670369, |
|
"learning_rate": 0.00010662222711273279, |
|
"loss": 0.6364, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.22487420863698854, |
|
"learning_rate": 0.00010617536966701962, |
|
"loss": 0.7288, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.24184706211280177, |
|
"learning_rate": 0.00010572838840358168, |
|
"loss": 0.6334, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.26180250015553935, |
|
"learning_rate": 0.00010528129228450645, |
|
"loss": 0.6579, |
|
"step": 2085 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.24611552837710818, |
|
"learning_rate": 0.00010483409027418425, |
|
"loss": 0.7506, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.22771528096688773, |
|
"learning_rate": 0.00010438679133912858, |
|
"loss": 0.6517, |
|
"step": 2095 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.25425984413622493, |
|
"learning_rate": 0.00010393940444779635, |
|
"loss": 0.6997, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.25948687177354873, |
|
"learning_rate": 0.00010349193857040787, |
|
"loss": 0.7736, |
|
"step": 2105 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.29503492872852033, |
|
"learning_rate": 0.00010304440267876727, |
|
"loss": 0.7855, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.21574528692226247, |
|
"learning_rate": 0.00010259680574608248, |
|
"loss": 0.765, |
|
"step": 2115 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.2530425727116828, |
|
"learning_rate": 0.00010214915674678523, |
|
"loss": 0.7705, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.2355690254358619, |
|
"learning_rate": 0.00010170146465635126, |
|
"loss": 0.7785, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.2393846506779741, |
|
"learning_rate": 0.00010125373845112034, |
|
"loss": 0.7422, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.2262608510161545, |
|
"learning_rate": 0.00010080598710811615, |
|
"loss": 0.6922, |
|
"step": 2135 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.3256276878966541, |
|
"learning_rate": 0.00010035821960486643, |
|
"loss": 0.7713, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.2496398007237336, |
|
"learning_rate": 9.9910444919223e-05, |
|
"loss": 0.7058, |
|
"step": 2145 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.29531137204475016, |
|
"learning_rate": 9.946267202918157e-05, |
|
"loss": 0.6994, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.2596775947550526, |
|
"learning_rate": 9.901490991270201e-05, |
|
"loss": 0.6993, |
|
"step": 2155 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.5102446180121213, |
|
"learning_rate": 9.856716754752796e-05, |
|
"loss": 0.6543, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.2754247927387255, |
|
"learning_rate": 9.811945391100724e-05, |
|
"loss": 0.7366, |
|
"step": 2165 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.23454396466279584, |
|
"learning_rate": 9.767177797991155e-05, |
|
"loss": 0.7441, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.22647788058437757, |
|
"learning_rate": 9.722414873025664e-05, |
|
"loss": 0.6849, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.2210584391132073, |
|
"learning_rate": 9.677657513712221e-05, |
|
"loss": 0.727, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.24884640250686244, |
|
"learning_rate": 9.632906617447212e-05, |
|
"loss": 0.6939, |
|
"step": 2185 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.2189510126025836, |
|
"learning_rate": 9.588163081497427e-05, |
|
"loss": 0.7871, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.25847268142498553, |
|
"learning_rate": 9.543427802982095e-05, |
|
"loss": 0.765, |
|
"step": 2195 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.2887583084675781, |
|
"learning_rate": 9.498701678854865e-05, |
|
"loss": 0.6855, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.21832636439823108, |
|
"learning_rate": 9.453985605885851e-05, |
|
"loss": 0.732, |
|
"step": 2205 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.25875988987504445, |
|
"learning_rate": 9.409280480643628e-05, |
|
"loss": 0.776, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.2937585368281696, |
|
"learning_rate": 9.364587199477276e-05, |
|
"loss": 0.7234, |
|
"step": 2215 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.21327899166987224, |
|
"learning_rate": 9.319906658498389e-05, |
|
"loss": 0.7519, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.24464572895636677, |
|
"learning_rate": 9.275239753563128e-05, |
|
"loss": 0.7196, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.22386709524202114, |
|
"learning_rate": 9.230587380254237e-05, |
|
"loss": 0.6901, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.23753591042587965, |
|
"learning_rate": 9.185950433863107e-05, |
|
"loss": 0.6676, |
|
"step": 2235 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.17549702993152236, |
|
"learning_rate": 9.141329809371803e-05, |
|
"loss": 0.7646, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.23044355517418907, |
|
"learning_rate": 9.096726401435146e-05, |
|
"loss": 0.7121, |
|
"step": 2245 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.2037402971541335, |
|
"learning_rate": 9.052141104362748e-05, |
|
"loss": 0.6541, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.2173788729813436, |
|
"learning_rate": 9.007574812101107e-05, |
|
"loss": 0.7522, |
|
"step": 2255 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.24289241576799026, |
|
"learning_rate": 8.963028418215653e-05, |
|
"loss": 0.8154, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.24110107744789394, |
|
"learning_rate": 8.918502815872865e-05, |
|
"loss": 0.7512, |
|
"step": 2265 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.2505142119875894, |
|
"learning_rate": 8.873998897822336e-05, |
|
"loss": 0.7519, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.2603773293471231, |
|
"learning_rate": 8.829517556378888e-05, |
|
"loss": 0.6786, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.30552669526483317, |
|
"learning_rate": 8.785059683404672e-05, |
|
"loss": 0.7016, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.30878912939747605, |
|
"learning_rate": 8.740626170291297e-05, |
|
"loss": 0.7535, |
|
"step": 2285 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.28441949312000736, |
|
"learning_rate": 8.696217907941941e-05, |
|
"loss": 0.7209, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.25705307392892374, |
|
"learning_rate": 8.65183578675351e-05, |
|
"loss": 0.7042, |
|
"step": 2295 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.2575869631501832, |
|
"learning_rate": 8.607480696598762e-05, |
|
"loss": 0.6118, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.3327254376840343, |
|
"learning_rate": 8.563153526808484e-05, |
|
"loss": 0.7738, |
|
"step": 2305 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.24348419071615154, |
|
"learning_rate": 8.518855166153644e-05, |
|
"loss": 0.6924, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.3469772616735954, |
|
"learning_rate": 8.474586502827591e-05, |
|
"loss": 0.7001, |
|
"step": 2315 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.19770150428519379, |
|
"learning_rate": 8.43034842442822e-05, |
|
"loss": 0.6706, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.4391168084334309, |
|
"learning_rate": 8.386141817940213e-05, |
|
"loss": 0.701, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.24046204644423808, |
|
"learning_rate": 8.341967569717202e-05, |
|
"loss": 0.6511, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.24121857059524954, |
|
"learning_rate": 8.297826565464057e-05, |
|
"loss": 0.637, |
|
"step": 2335 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.289807786292161, |
|
"learning_rate": 8.253719690219079e-05, |
|
"loss": 0.7298, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.2675093976169691, |
|
"learning_rate": 8.20964782833628e-05, |
|
"loss": 0.6229, |
|
"step": 2345 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.32873199218989857, |
|
"learning_rate": 8.165611863467644e-05, |
|
"loss": 0.7254, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.21463882575401755, |
|
"learning_rate": 8.12161267854542e-05, |
|
"loss": 0.6896, |
|
"step": 2355 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.20392542858991738, |
|
"learning_rate": 8.077651155764387e-05, |
|
"loss": 0.6524, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.21818270130807613, |
|
"learning_rate": 8.033728176564216e-05, |
|
"loss": 0.7482, |
|
"step": 2365 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.3236950015797695, |
|
"learning_rate": 7.98984462161175e-05, |
|
"loss": 0.7463, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.23726714785703115, |
|
"learning_rate": 7.946001370783375e-05, |
|
"loss": 0.7916, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.2087887063125164, |
|
"learning_rate": 7.902199303147363e-05, |
|
"loss": 0.6875, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.27106485088624854, |
|
"learning_rate": 7.858439296946263e-05, |
|
"loss": 0.7802, |
|
"step": 2385 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.2851539067095835, |
|
"learning_rate": 7.814722229579264e-05, |
|
"loss": 0.6769, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.24800513231309682, |
|
"learning_rate": 7.771048977584641e-05, |
|
"loss": 0.6148, |
|
"step": 2395 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.30123103203277307, |
|
"learning_rate": 7.727420416622144e-05, |
|
"loss": 0.8299, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.28336701564838623, |
|
"learning_rate": 7.683837421455466e-05, |
|
"loss": 0.6799, |
|
"step": 2405 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.24418965636330248, |
|
"learning_rate": 7.640300865934687e-05, |
|
"loss": 0.6924, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.23926492236991967, |
|
"learning_rate": 7.596811622978765e-05, |
|
"loss": 0.7157, |
|
"step": 2415 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.2584954114418789, |
|
"learning_rate": 7.553370564558032e-05, |
|
"loss": 0.7652, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.29773986167748034, |
|
"learning_rate": 7.509978561676699e-05, |
|
"loss": 0.7581, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.2368205960863638, |
|
"learning_rate": 7.46663648435541e-05, |
|
"loss": 0.6941, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.2839556861514916, |
|
"learning_rate": 7.423345201613778e-05, |
|
"loss": 0.677, |
|
"step": 2435 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.32721343862085367, |
|
"learning_rate": 7.380105581452987e-05, |
|
"loss": 0.7381, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.2620811211111583, |
|
"learning_rate": 7.336918490838356e-05, |
|
"loss": 0.7808, |
|
"step": 2445 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.23444393543960068, |
|
"learning_rate": 7.293784795681994e-05, |
|
"loss": 0.7252, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.2457300140925708, |
|
"learning_rate": 7.250705360825392e-05, |
|
"loss": 0.6721, |
|
"step": 2455 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.27505166245514767, |
|
"learning_rate": 7.207681050022132e-05, |
|
"loss": 0.7478, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.2910558326018566, |
|
"learning_rate": 7.16471272592053e-05, |
|
"loss": 0.7808, |
|
"step": 2465 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.24862527602067405, |
|
"learning_rate": 7.121801250046363e-05, |
|
"loss": 0.7691, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.1902191441389871, |
|
"learning_rate": 7.078947482785576e-05, |
|
"loss": 0.7097, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.23472131108538954, |
|
"learning_rate": 7.036152283367056e-05, |
|
"loss": 0.6912, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.2189915658804591, |
|
"learning_rate": 6.993416509845376e-05, |
|
"loss": 0.7211, |
|
"step": 2485 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.2603896298420689, |
|
"learning_rate": 6.950741019083617e-05, |
|
"loss": 0.7188, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.2559070868163805, |
|
"learning_rate": 6.908126666736165e-05, |
|
"loss": 0.6925, |
|
"step": 2495 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.29850542028796345, |
|
"learning_rate": 6.865574307231575e-05, |
|
"loss": 0.7119, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.19364511158581588, |
|
"learning_rate": 6.823084793755417e-05, |
|
"loss": 0.685, |
|
"step": 2505 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.3367238354963921, |
|
"learning_rate": 6.780658978233199e-05, |
|
"loss": 0.7398, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.19047235797749595, |
|
"learning_rate": 6.73829771131325e-05, |
|
"loss": 0.7311, |
|
"step": 2515 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.3005689166877733, |
|
"learning_rate": 6.696001842349702e-05, |
|
"loss": 0.6747, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.31211848702866324, |
|
"learning_rate": 6.653772219385427e-05, |
|
"loss": 0.759, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.25625855783899115, |
|
"learning_rate": 6.611609689135056e-05, |
|
"loss": 0.7853, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.23106616213334621, |
|
"learning_rate": 6.569515096967991e-05, |
|
"loss": 0.6437, |
|
"step": 2535 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.18916281943913288, |
|
"learning_rate": 6.527489286891459e-05, |
|
"loss": 0.5797, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.21350268536847394, |
|
"learning_rate": 6.485533101533583e-05, |
|
"loss": 0.7013, |
|
"step": 2545 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.28763484217532065, |
|
"learning_rate": 6.443647382126509e-05, |
|
"loss": 0.7229, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.2488901500729856, |
|
"learning_rate": 6.401832968489501e-05, |
|
"loss": 0.686, |
|
"step": 2555 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.20556797301856877, |
|
"learning_rate": 6.360090699012145e-05, |
|
"loss": 0.7435, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.25971455100175256, |
|
"learning_rate": 6.3184214106375e-05, |
|
"loss": 0.6772, |
|
"step": 2565 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.2438737947426562, |
|
"learning_rate": 6.27682593884535e-05, |
|
"loss": 0.6351, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.2239080056853126, |
|
"learning_rate": 6.235305117635428e-05, |
|
"loss": 0.7386, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.2284401860369392, |
|
"learning_rate": 6.193859779510712e-05, |
|
"loss": 0.6939, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.24456271598752877, |
|
"learning_rate": 6.152490755460715e-05, |
|
"loss": 0.6706, |
|
"step": 2585 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.22235388160369995, |
|
"learning_rate": 6.111198874944845e-05, |
|
"loss": 0.6903, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.2144202608438619, |
|
"learning_rate": 6.0699849658757545e-05, |
|
"loss": 0.7528, |
|
"step": 2595 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.19956268282116124, |
|
"learning_rate": 6.0288498546027536e-05, |
|
"loss": 0.775, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.22191365952846825, |
|
"learning_rate": 5.987794365895229e-05, |
|
"loss": 0.6759, |
|
"step": 2605 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.31000384496295275, |
|
"learning_rate": 5.946819322926127e-05, |
|
"loss": 0.6828, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.27838526207253456, |
|
"learning_rate": 5.9059255472554195e-05, |
|
"loss": 0.6906, |
|
"step": 2615 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.2320736604730188, |
|
"learning_rate": 5.865113858813673e-05, |
|
"loss": 0.6665, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.282228906687919, |
|
"learning_rate": 5.824385075885557e-05, |
|
"loss": 0.6861, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.21066270814730526, |
|
"learning_rate": 5.783740015093484e-05, |
|
"loss": 0.7175, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.2665172474983474, |
|
"learning_rate": 5.743179491381204e-05, |
|
"loss": 0.706, |
|
"step": 2635 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.2338869396395331, |
|
"learning_rate": 5.702704317997492e-05, |
|
"loss": 0.6729, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.21891639712761243, |
|
"learning_rate": 5.662315306479806e-05, |
|
"loss": 0.7053, |
|
"step": 2645 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.22963997129503227, |
|
"learning_rate": 5.6220132666380635e-05, |
|
"loss": 0.624, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.28440467945947445, |
|
"learning_rate": 5.581799006538354e-05, |
|
"loss": 0.7612, |
|
"step": 2655 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.29609689514958054, |
|
"learning_rate": 5.541673332486773e-05, |
|
"loss": 0.7444, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.2650460281999568, |
|
"learning_rate": 5.5016370490132364e-05, |
|
"loss": 0.8484, |
|
"step": 2665 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.28809355156027594, |
|
"learning_rate": 5.4616909588553674e-05, |
|
"loss": 0.7871, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.24812243233821338, |
|
"learning_rate": 5.4218358629423794e-05, |
|
"loss": 0.7977, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.2920348572251461, |
|
"learning_rate": 5.3820725603790346e-05, |
|
"loss": 0.7394, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.29200632111876645, |
|
"learning_rate": 5.342401848429615e-05, |
|
"loss": 0.778, |
|
"step": 2685 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.2432759115984862, |
|
"learning_rate": 5.30282452250193e-05, |
|
"loss": 0.7885, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.2838656924088362, |
|
"learning_rate": 5.263341376131382e-05, |
|
"loss": 0.7023, |
|
"step": 2695 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.28056038476141837, |
|
"learning_rate": 5.223953200965055e-05, |
|
"loss": 0.7367, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.22746358276548237, |
|
"learning_rate": 5.1846607867458196e-05, |
|
"loss": 0.8169, |
|
"step": 2705 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.2754771874933674, |
|
"learning_rate": 5.145464921296537e-05, |
|
"loss": 0.7762, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.275520392192173, |
|
"learning_rate": 5.1063663905042225e-05, |
|
"loss": 0.6907, |
|
"step": 2715 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.2742206750054377, |
|
"learning_rate": 5.067365978304315e-05, |
|
"loss": 0.6962, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.3043303805027072, |
|
"learning_rate": 5.0284644666649485e-05, |
|
"loss": 0.6863, |
|
"step": 2725 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.26287170160778367, |
|
"learning_rate": 4.9896626355712805e-05, |
|
"loss": 0.6837, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.23968527436607373, |
|
"learning_rate": 4.95096126300984e-05, |
|
"loss": 0.6932, |
|
"step": 2735 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.19323389498485988, |
|
"learning_rate": 4.912361124952948e-05, |
|
"loss": 0.7342, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.2216783500769926, |
|
"learning_rate": 4.873862995343139e-05, |
|
"loss": 0.6647, |
|
"step": 2745 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.26023716724912155, |
|
"learning_rate": 4.835467646077656e-05, |
|
"loss": 0.7357, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.25899624407357125, |
|
"learning_rate": 4.797175846992964e-05, |
|
"loss": 0.6544, |
|
"step": 2755 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.18644116956534368, |
|
"learning_rate": 4.7589883658493296e-05, |
|
"loss": 0.7359, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.24394521839077538, |
|
"learning_rate": 4.72090596831542e-05, |
|
"loss": 0.7304, |
|
"step": 2765 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.3406491454673553, |
|
"learning_rate": 4.682929417952939e-05, |
|
"loss": 0.6508, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.2508477121094201, |
|
"learning_rate": 4.645059476201333e-05, |
|
"loss": 0.6859, |
|
"step": 2775 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.20268926784051644, |
|
"learning_rate": 4.6072969023625165e-05, |
|
"loss": 0.7103, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.24062467799728976, |
|
"learning_rate": 4.5696424535856574e-05, |
|
"loss": 0.705, |
|
"step": 2785 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.26693143779686646, |
|
"learning_rate": 4.532096884851978e-05, |
|
"loss": 0.7319, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.23409445770349235, |
|
"learning_rate": 4.494660948959645e-05, |
|
"loss": 0.791, |
|
"step": 2795 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.2845933689629394, |
|
"learning_rate": 4.457335396508631e-05, |
|
"loss": 0.6062, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.24119287628680353, |
|
"learning_rate": 4.420120975885723e-05, |
|
"loss": 0.7077, |
|
"step": 2805 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.21991354406483815, |
|
"learning_rate": 4.383018433249464e-05, |
|
"loss": 0.6805, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.2475445630279962, |
|
"learning_rate": 4.346028512515232e-05, |
|
"loss": 0.593, |
|
"step": 2815 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.24157860899387687, |
|
"learning_rate": 4.309151955340297e-05, |
|
"loss": 0.7195, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.2123078817060314, |
|
"learning_rate": 4.272389501108962e-05, |
|
"loss": 0.7566, |
|
"step": 2825 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.33254744474875086, |
|
"learning_rate": 4.2357418869177354e-05, |
|
"loss": 0.7517, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.2450146085609281, |
|
"learning_rate": 4.1992098475605654e-05, |
|
"loss": 0.7792, |
|
"step": 2835 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.2704781574806764, |
|
"learning_rate": 4.162794115514078e-05, |
|
"loss": 0.7238, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.37954117128340364, |
|
"learning_rate": 4.1264954209229254e-05, |
|
"loss": 0.6556, |
|
"step": 2845 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.2860982586980259, |
|
"learning_rate": 4.0903144915851174e-05, |
|
"loss": 0.6506, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.20667223525821882, |
|
"learning_rate": 4.054252052937444e-05, |
|
"loss": 0.6182, |
|
"step": 2855 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.2634554788563136, |
|
"learning_rate": 4.018308828040924e-05, |
|
"loss": 0.727, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.2786956847780988, |
|
"learning_rate": 3.982485537566321e-05, |
|
"loss": 0.7262, |
|
"step": 2865 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.18437896768984685, |
|
"learning_rate": 3.946782899779667e-05, |
|
"loss": 0.6169, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.2622931367541885, |
|
"learning_rate": 3.911201630527894e-05, |
|
"loss": 0.7385, |
|
"step": 2875 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.2748261196950285, |
|
"learning_rate": 3.875742443224451e-05, |
|
"loss": 0.6136, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.28744844039364625, |
|
"learning_rate": 3.84040604883502e-05, |
|
"loss": 0.7284, |
|
"step": 2885 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.21857313698448572, |
|
"learning_rate": 3.805193155863247e-05, |
|
"loss": 0.6383, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.2128180862906637, |
|
"learning_rate": 3.770104470336555e-05, |
|
"loss": 0.7173, |
|
"step": 2895 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.2130466930744276, |
|
"learning_rate": 3.7351406957919636e-05, |
|
"loss": 0.7266, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.30988412023187845, |
|
"learning_rate": 3.700302533262007e-05, |
|
"loss": 0.717, |
|
"step": 2905 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.29577601576252954, |
|
"learning_rate": 3.665590681260658e-05, |
|
"loss": 0.6991, |
|
"step": 2910 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.29759259924638304, |
|
"learning_rate": 3.631005835769334e-05, |
|
"loss": 0.8029, |
|
"step": 2915 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.22770941704354283, |
|
"learning_rate": 3.59654869022294e-05, |
|
"loss": 0.7255, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.2245089532543722, |
|
"learning_rate": 3.5622199354959677e-05, |
|
"loss": 0.8236, |
|
"step": 2925 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.209100399963671, |
|
"learning_rate": 3.5280202598886324e-05, |
|
"loss": 0.6472, |
|
"step": 2930 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.22771150718592803, |
|
"learning_rate": 3.4939503491130934e-05, |
|
"loss": 0.6684, |
|
"step": 2935 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.2617878485718964, |
|
"learning_rate": 3.4600108862796796e-05, |
|
"loss": 0.7271, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.2674851467564555, |
|
"learning_rate": 3.426202551883213e-05, |
|
"loss": 0.7587, |
|
"step": 2945 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.2200031558966552, |
|
"learning_rate": 3.392526023789349e-05, |
|
"loss": 0.7258, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.21417253713351433, |
|
"learning_rate": 3.358981977221006e-05, |
|
"loss": 0.6981, |
|
"step": 2955 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.2217852576193653, |
|
"learning_rate": 3.325571084744803e-05, |
|
"loss": 0.6972, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.2984053822842527, |
|
"learning_rate": 3.292294016257598e-05, |
|
"loss": 0.6919, |
|
"step": 2965 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.25032718638928686, |
|
"learning_rate": 3.259151438973024e-05, |
|
"loss": 0.7997, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.26398149994753983, |
|
"learning_rate": 3.2261440174081525e-05, |
|
"loss": 0.7703, |
|
"step": 2975 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.2088495888581316, |
|
"learning_rate": 3.1932724133701344e-05, |
|
"loss": 0.5416, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.2886491177936703, |
|
"learning_rate": 3.160537285942956e-05, |
|
"loss": 0.6853, |
|
"step": 2985 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.2594733197020413, |
|
"learning_rate": 3.1279392914742046e-05, |
|
"loss": 0.7261, |
|
"step": 2990 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.3017152993780796, |
|
"learning_rate": 3.0954790835619195e-05, |
|
"loss": 0.6988, |
|
"step": 2995 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.2613281888272547, |
|
"learning_rate": 3.06315731304148e-05, |
|
"loss": 0.7953, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.3030649141190901, |
|
"learning_rate": 3.0309746279725748e-05, |
|
"loss": 0.6728, |
|
"step": 3005 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.2049675658528854, |
|
"learning_rate": 2.998931673626175e-05, |
|
"loss": 0.6939, |
|
"step": 3010 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.23405886262502706, |
|
"learning_rate": 2.9670290924716337e-05, |
|
"loss": 0.7633, |
|
"step": 3015 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.16410838860875954, |
|
"learning_rate": 2.935267524163774e-05, |
|
"loss": 0.7258, |
|
"step": 3020 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.24028216791671228, |
|
"learning_rate": 2.9036476055300765e-05, |
|
"loss": 0.759, |
|
"step": 3025 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.2512891253559569, |
|
"learning_rate": 2.872169970557913e-05, |
|
"loss": 0.7622, |
|
"step": 3030 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.24326580414626328, |
|
"learning_rate": 2.8408352503818357e-05, |
|
"loss": 0.7471, |
|
"step": 3035 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.30684996416868315, |
|
"learning_rate": 2.8096440732709083e-05, |
|
"loss": 0.7234, |
|
"step": 3040 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.2610371672143647, |
|
"learning_rate": 2.778597064616133e-05, |
|
"loss": 0.7114, |
|
"step": 3045 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.28649543075043066, |
|
"learning_rate": 2.7476948469178887e-05, |
|
"loss": 0.6914, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.24940516209657004, |
|
"learning_rate": 2.716938039773459e-05, |
|
"loss": 0.754, |
|
"step": 3055 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.31813491613101524, |
|
"learning_rate": 2.6863272598646106e-05, |
|
"loss": 0.6058, |
|
"step": 3060 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.21911314508163085, |
|
"learning_rate": 2.6558631209452323e-05, |
|
"loss": 0.7083, |
|
"step": 3065 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.24723577107070233, |
|
"learning_rate": 2.625546233829016e-05, |
|
"loss": 0.6785, |
|
"step": 3070 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.26215344220728737, |
|
"learning_rate": 2.5953772063772252e-05, |
|
"loss": 0.7434, |
|
"step": 3075 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.2182580130319897, |
|
"learning_rate": 2.5653566434864928e-05, |
|
"loss": 0.7029, |
|
"step": 3080 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.2589107810453318, |
|
"learning_rate": 2.5354851470767037e-05, |
|
"loss": 0.702, |
|
"step": 3085 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.25236658924095434, |
|
"learning_rate": 2.5057633160789184e-05, |
|
"loss": 0.7096, |
|
"step": 3090 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.2751653091880903, |
|
"learning_rate": 2.476191746423373e-05, |
|
"loss": 0.7791, |
|
"step": 3095 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.27791939731022475, |
|
"learning_rate": 2.446771031027527e-05, |
|
"loss": 0.7008, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.2337931562400226, |
|
"learning_rate": 2.4175017597841666e-05, |
|
"loss": 0.7951, |
|
"step": 3105 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.23060409799544296, |
|
"learning_rate": 2.3883845195495878e-05, |
|
"loss": 0.7475, |
|
"step": 3110 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.19515481997034956, |
|
"learning_rate": 2.3594198941318248e-05, |
|
"loss": 0.677, |
|
"step": 3115 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.21013466271107667, |
|
"learning_rate": 2.330608464278953e-05, |
|
"loss": 0.7343, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.2655018550539882, |
|
"learning_rate": 2.3019508076674267e-05, |
|
"loss": 0.6745, |
|
"step": 3125 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.22554453658559034, |
|
"learning_rate": 2.273447498890521e-05, |
|
"loss": 0.8374, |
|
"step": 3130 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.290174375117781, |
|
"learning_rate": 2.2450991094467865e-05, |
|
"loss": 0.668, |
|
"step": 3135 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.22779449704015578, |
|
"learning_rate": 2.2169062077286075e-05, |
|
"loss": 0.6899, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.25494436011601035, |
|
"learning_rate": 2.1888693590107946e-05, |
|
"loss": 0.7013, |
|
"step": 3145 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.14266544623444502, |
|
"learning_rate": 2.1609891254392678e-05, |
|
"loss": 0.6311, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.23298437086921395, |
|
"learning_rate": 2.1332660660197602e-05, |
|
"loss": 0.6729, |
|
"step": 3155 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.30998975291365666, |
|
"learning_rate": 2.1057007366066373e-05, |
|
"loss": 0.777, |
|
"step": 3160 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.24570574953745566, |
|
"learning_rate": 2.0782936898917204e-05, |
|
"loss": 0.6747, |
|
"step": 3165 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.20435326171988946, |
|
"learning_rate": 2.0510454753932395e-05, |
|
"loss": 0.6928, |
|
"step": 3170 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.24135761451835352, |
|
"learning_rate": 2.0239566394447874e-05, |
|
"loss": 0.7221, |
|
"step": 3175 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.21829756764053368, |
|
"learning_rate": 1.9970277251843862e-05, |
|
"loss": 0.7179, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.19424951966772022, |
|
"learning_rate": 1.9702592725435786e-05, |
|
"loss": 0.7273, |
|
"step": 3185 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.234887739233565, |
|
"learning_rate": 1.9436518182366158e-05, |
|
"loss": 0.7391, |
|
"step": 3190 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.26397014493832816, |
|
"learning_rate": 1.9172058957496876e-05, |
|
"loss": 0.7456, |
|
"step": 3195 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.19484126439669286, |
|
"learning_rate": 1.8909220353302392e-05, |
|
"loss": 0.6979, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.3263727199356017, |
|
"learning_rate": 1.8648007639763176e-05, |
|
"loss": 0.7104, |
|
"step": 3205 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.2490196028039222, |
|
"learning_rate": 1.838842605426031e-05, |
|
"loss": 0.6245, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.22590753983736134, |
|
"learning_rate": 1.813048080147025e-05, |
|
"loss": 0.6477, |
|
"step": 3215 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.1934318577669605, |
|
"learning_rate": 1.7874177053260598e-05, |
|
"loss": 0.6484, |
|
"step": 3220 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.19824954761719257, |
|
"learning_rate": 1.7619519948586348e-05, |
|
"loss": 0.6, |
|
"step": 3225 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.29596182070597254, |
|
"learning_rate": 1.736651459338695e-05, |
|
"loss": 0.6995, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.2552348994159813, |
|
"learning_rate": 1.711516606048377e-05, |
|
"loss": 0.7123, |
|
"step": 3235 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.23821982266920058, |
|
"learning_rate": 1.6865479389478545e-05, |
|
"loss": 0.7231, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.20223828918906173, |
|
"learning_rate": 1.6617459586652196e-05, |
|
"loss": 0.673, |
|
"step": 3245 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.21541619069004347, |
|
"learning_rate": 1.6371111624864543e-05, |
|
"loss": 0.6788, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.23132279393395067, |
|
"learning_rate": 1.612644044345456e-05, |
|
"loss": 0.7001, |
|
"step": 3255 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.3104102493541718, |
|
"learning_rate": 1.5883450948141377e-05, |
|
"loss": 0.6592, |
|
"step": 3260 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.19498096199332454, |
|
"learning_rate": 1.564214801092585e-05, |
|
"loss": 0.764, |
|
"step": 3265 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.22350807889337018, |
|
"learning_rate": 1.540253646999299e-05, |
|
"loss": 0.7285, |
|
"step": 3270 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.2530056423195899, |
|
"learning_rate": 1.5164621129614787e-05, |
|
"loss": 0.6813, |
|
"step": 3275 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.24407289762340453, |
|
"learning_rate": 1.4928406760054059e-05, |
|
"loss": 0.702, |
|
"step": 3280 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.27667414480460156, |
|
"learning_rate": 1.4693898097468662e-05, |
|
"loss": 0.7285, |
|
"step": 3285 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.24681628743295808, |
|
"learning_rate": 1.4461099843816684e-05, |
|
"loss": 0.6954, |
|
"step": 3290 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.24873679256421594, |
|
"learning_rate": 1.4230016666761981e-05, |
|
"loss": 0.7655, |
|
"step": 3295 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.22862446264265862, |
|
"learning_rate": 1.4000653199580782e-05, |
|
"loss": 0.7687, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.3046659102738475, |
|
"learning_rate": 1.3773014041068633e-05, |
|
"loss": 0.7156, |
|
"step": 3305 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.22407782938978063, |
|
"learning_rate": 1.3547103755448287e-05, |
|
"loss": 0.7858, |
|
"step": 3310 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.19036374903254158, |
|
"learning_rate": 1.3322926872278097e-05, |
|
"loss": 0.7766, |
|
"step": 3315 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.26699406565117034, |
|
"learning_rate": 1.3100487886361379e-05, |
|
"loss": 0.7457, |
|
"step": 3320 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.22344849327126515, |
|
"learning_rate": 1.2879791257656049e-05, |
|
"loss": 0.7227, |
|
"step": 3325 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.1919000639308728, |
|
"learning_rate": 1.266084141118542e-05, |
|
"loss": 0.683, |
|
"step": 3330 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.2126312569959486, |
|
"learning_rate": 1.2443642736949258e-05, |
|
"loss": 0.702, |
|
"step": 3335 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.21328716751976873, |
|
"learning_rate": 1.2228199589835999e-05, |
|
"loss": 0.7592, |
|
"step": 3340 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.25817417734261355, |
|
"learning_rate": 1.2014516289535249e-05, |
|
"loss": 0.7101, |
|
"step": 3345 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.24654221507857982, |
|
"learning_rate": 1.1802597120451286e-05, |
|
"loss": 0.7651, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.2729024564998873, |
|
"learning_rate": 1.1592446331617068e-05, |
|
"loss": 0.6934, |
|
"step": 3355 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.27603586653087275, |
|
"learning_rate": 1.1384068136609105e-05, |
|
"loss": 0.7848, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.3079236794395131, |
|
"learning_rate": 1.1177466713462926e-05, |
|
"loss": 0.6581, |
|
"step": 3365 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.31502255340495816, |
|
"learning_rate": 1.0972646204589377e-05, |
|
"loss": 0.6583, |
|
"step": 3370 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.22084726894868312, |
|
"learning_rate": 1.076961071669148e-05, |
|
"loss": 0.7294, |
|
"step": 3375 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.2472008055830039, |
|
"learning_rate": 1.0568364320682178e-05, |
|
"loss": 0.7834, |
|
"step": 3380 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.2277343730480719, |
|
"learning_rate": 1.0368911051602636e-05, |
|
"loss": 0.66, |
|
"step": 3385 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.2415285274184652, |
|
"learning_rate": 1.0171254908541372e-05, |
|
"loss": 0.6748, |
|
"step": 3390 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.2861895095719399, |
|
"learning_rate": 9.975399854554068e-06, |
|
"loss": 0.6721, |
|
"step": 3395 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.25395148971387527, |
|
"learning_rate": 9.781349816584162e-06, |
|
"loss": 0.7267, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.21070182161070172, |
|
"learning_rate": 9.589108685383975e-06, |
|
"loss": 0.7106, |
|
"step": 3405 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.25593439329555395, |
|
"learning_rate": 9.398680315436903e-06, |
|
"loss": 0.6957, |
|
"step": 3410 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.21752295344145742, |
|
"learning_rate": 9.210068524879923e-06, |
|
"loss": 0.685, |
|
"step": 3415 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.2446368196142467, |
|
"learning_rate": 9.023277095427173e-06, |
|
"loss": 0.7026, |
|
"step": 3420 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.23687207505265248, |
|
"learning_rate": 8.838309772294085e-06, |
|
"loss": 0.744, |
|
"step": 3425 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.25129247742502786, |
|
"learning_rate": 8.655170264122303e-06, |
|
"loss": 0.731, |
|
"step": 3430 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.22617739846256776, |
|
"learning_rate": 8.473862242905339e-06, |
|
"loss": 0.7936, |
|
"step": 3435 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.19703347689106585, |
|
"learning_rate": 8.294389343914899e-06, |
|
"loss": 0.6508, |
|
"step": 3440 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.2290192280255233, |
|
"learning_rate": 8.11675516562802e-06, |
|
"loss": 0.7213, |
|
"step": 3445 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.22631388422212603, |
|
"learning_rate": 7.940963269654922e-06, |
|
"loss": 0.8072, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.23898504344416246, |
|
"learning_rate": 7.767017180667645e-06, |
|
"loss": 0.7186, |
|
"step": 3455 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.1771416341491615, |
|
"learning_rate": 7.594920386329252e-06, |
|
"loss": 0.6843, |
|
"step": 3460 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.32232301037200284, |
|
"learning_rate": 7.4246763372240795e-06, |
|
"loss": 0.6092, |
|
"step": 3465 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.20983698372327894, |
|
"learning_rate": 7.256288446788362e-06, |
|
"loss": 0.6873, |
|
"step": 3470 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.23642454930535492, |
|
"learning_rate": 7.089760091241937e-06, |
|
"loss": 0.7668, |
|
"step": 3475 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.19342866826333685, |
|
"learning_rate": 6.925094609520455e-06, |
|
"loss": 0.6985, |
|
"step": 3480 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.1659312022620069, |
|
"learning_rate": 6.762295303208532e-06, |
|
"loss": 0.6082, |
|
"step": 3485 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.23314155105131054, |
|
"learning_rate": 6.601365436473439e-06, |
|
"loss": 0.7625, |
|
"step": 3490 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.2266096665634237, |
|
"learning_rate": 6.44230823599975e-06, |
|
"loss": 0.7175, |
|
"step": 3495 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.285619796115372, |
|
"learning_rate": 6.2851268909245865e-06, |
|
"loss": 0.6727, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.2499473433608083, |
|
"learning_rate": 6.1298245527737e-06, |
|
"loss": 0.7572, |
|
"step": 3505 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.23560223011085285, |
|
"learning_rate": 5.976404335398256e-06, |
|
"loss": 0.6292, |
|
"step": 3510 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.33344906781754363, |
|
"learning_rate": 5.824869314912473e-06, |
|
"loss": 0.7019, |
|
"step": 3515 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.19032010411212455, |
|
"learning_rate": 5.675222529631841e-06, |
|
"loss": 0.7159, |
|
"step": 3520 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.33161459892073875, |
|
"learning_rate": 5.527466980012297e-06, |
|
"loss": 0.7218, |
|
"step": 3525 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.2327886817133746, |
|
"learning_rate": 5.381605628590003e-06, |
|
"loss": 0.7328, |
|
"step": 3530 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.22800725916467116, |
|
"learning_rate": 5.237641399921955e-06, |
|
"loss": 0.6795, |
|
"step": 3535 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.23265993183122352, |
|
"learning_rate": 5.095577180527378e-06, |
|
"loss": 0.752, |
|
"step": 3540 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.21740715842870284, |
|
"learning_rate": 4.9554158188298445e-06, |
|
"loss": 0.7713, |
|
"step": 3545 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.2575708746269016, |
|
"learning_rate": 4.817160125100106e-06, |
|
"loss": 0.7631, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.26658430974820374, |
|
"learning_rate": 4.680812871399854e-06, |
|
"loss": 0.7095, |
|
"step": 3555 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.22923585645582018, |
|
"learning_rate": 4.546376791525975e-06, |
|
"loss": 0.7197, |
|
"step": 3560 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.20839075381158542, |
|
"learning_rate": 4.413854580955945e-06, |
|
"loss": 0.717, |
|
"step": 3565 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.2126973684335578, |
|
"learning_rate": 4.2832488967935795e-06, |
|
"loss": 0.7439, |
|
"step": 3570 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.22046801778519598, |
|
"learning_rate": 4.154562357715952e-06, |
|
"loss": 0.7287, |
|
"step": 3575 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.22198088834095264, |
|
"learning_rate": 4.02779754392072e-06, |
|
"loss": 0.6862, |
|
"step": 3580 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.2562953238617015, |
|
"learning_rate": 3.90295699707447e-06, |
|
"loss": 0.705, |
|
"step": 3585 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.2832586699922064, |
|
"learning_rate": 3.780043220261764e-06, |
|
"loss": 0.7525, |
|
"step": 3590 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.23818590199709755, |
|
"learning_rate": 3.659058677934957e-06, |
|
"loss": 0.7018, |
|
"step": 3595 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.3095559851055113, |
|
"learning_rate": 3.540005795864709e-06, |
|
"loss": 0.6073, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.1861043245337588, |
|
"learning_rate": 3.422886961091476e-06, |
|
"loss": 0.6862, |
|
"step": 3605 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.2807414585458434, |
|
"learning_rate": 3.3077045218775192e-06, |
|
"loss": 0.7819, |
|
"step": 3610 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.23756038931466225, |
|
"learning_rate": 3.194460787659892e-06, |
|
"loss": 0.7211, |
|
"step": 3615 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.2375925655429517, |
|
"learning_rate": 3.0831580290041184e-06, |
|
"loss": 0.7611, |
|
"step": 3620 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.35878418981357185, |
|
"learning_rate": 2.973798477558709e-06, |
|
"loss": 0.7034, |
|
"step": 3625 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.18014791198441923, |
|
"learning_rate": 2.8663843260103074e-06, |
|
"loss": 0.6713, |
|
"step": 3630 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.2066769742143976, |
|
"learning_rate": 2.76091772803988e-06, |
|
"loss": 0.7165, |
|
"step": 3635 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.29548750089547526, |
|
"learning_rate": 2.6574007982793857e-06, |
|
"loss": 0.7431, |
|
"step": 3640 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.276827175378835, |
|
"learning_rate": 2.555835612269475e-06, |
|
"loss": 0.665, |
|
"step": 3645 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.21719297673422058, |
|
"learning_rate": 2.456224206417812e-06, |
|
"loss": 0.697, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.24117198209251042, |
|
"learning_rate": 2.35856857795832e-06, |
|
"loss": 0.7668, |
|
"step": 3655 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.4419996258477192, |
|
"learning_rate": 2.262870684911045e-06, |
|
"loss": 0.6807, |
|
"step": 3660 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.2922290003913108, |
|
"learning_rate": 2.169132446042976e-06, |
|
"loss": 0.6772, |
|
"step": 3665 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.21578507137937, |
|
"learning_rate": 2.0773557408295343e-06, |
|
"loss": 0.7143, |
|
"step": 3670 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.22699656079084274, |
|
"learning_rate": 1.9875424094168647e-06, |
|
"loss": 0.6936, |
|
"step": 3675 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.24001514084329242, |
|
"learning_rate": 1.8996942525850047e-06, |
|
"loss": 0.6977, |
|
"step": 3680 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.26514502359960324, |
|
"learning_rate": 1.813813031711742e-06, |
|
"loss": 0.701, |
|
"step": 3685 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.23244532178437396, |
|
"learning_rate": 1.7299004687372665e-06, |
|
"loss": 0.6792, |
|
"step": 3690 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.2517442477838529, |
|
"learning_rate": 1.6479582461297217e-06, |
|
"loss": 0.6781, |
|
"step": 3695 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.22695769814956426, |
|
"learning_rate": 1.5679880068514174e-06, |
|
"loss": 0.6858, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.21710844280108782, |
|
"learning_rate": 1.4899913543258814e-06, |
|
"loss": 0.7148, |
|
"step": 3705 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.2967274268521252, |
|
"learning_rate": 1.4139698524057165e-06, |
|
"loss": 0.781, |
|
"step": 3710 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.30352069911343654, |
|
"learning_rate": 1.3399250253413154e-06, |
|
"loss": 0.7702, |
|
"step": 3715 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.2929106725336177, |
|
"learning_rate": 1.2678583577501624e-06, |
|
"loss": 0.6759, |
|
"step": 3720 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.29851324638490223, |
|
"learning_rate": 1.1977712945872243e-06, |
|
"loss": 0.7205, |
|
"step": 3725 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.26915452333889833, |
|
"learning_rate": 1.1296652411158182e-06, |
|
"loss": 0.7057, |
|
"step": 3730 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.2245310916127946, |
|
"learning_rate": 1.0635415628795665e-06, |
|
"loss": 0.6926, |
|
"step": 3735 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.2178134240444238, |
|
"learning_rate": 9.994015856749527e-07, |
|
"loss": 0.7104, |
|
"step": 3740 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.18742301371018372, |
|
"learning_rate": 9.372465955247544e-07, |
|
"loss": 0.6046, |
|
"step": 3745 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.2282365317937403, |
|
"learning_rate": 8.770778386522627e-07, |
|
"loss": 0.7643, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.23411248148329084, |
|
"learning_rate": 8.188965214562804e-07, |
|
"loss": 0.7397, |
|
"step": 3755 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.25053318378159295, |
|
"learning_rate": 7.627038104869199e-07, |
|
"loss": 0.6724, |
|
"step": 3760 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.27403052281802975, |
|
"learning_rate": 7.085008324222763e-07, |
|
"loss": 0.6412, |
|
"step": 3765 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.2635701878097459, |
|
"learning_rate": 6.562886740457797e-07, |
|
"loss": 0.7377, |
|
"step": 3770 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.21017227480655776, |
|
"learning_rate": 6.060683822244117e-07, |
|
"loss": 0.6977, |
|
"step": 3775 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.3453902794038148, |
|
"learning_rate": 5.578409638877457e-07, |
|
"loss": 0.5952, |
|
"step": 3780 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.24555784325804006, |
|
"learning_rate": 5.116073860077064e-07, |
|
"loss": 0.6711, |
|
"step": 3785 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.29390073833065394, |
|
"learning_rate": 4.6736857557925227e-07, |
|
"loss": 0.691, |
|
"step": 3790 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.2782701779800116, |
|
"learning_rate": 4.2512541960171294e-07, |
|
"loss": 0.7768, |
|
"step": 3795 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.37197636711074117, |
|
"learning_rate": 3.8487876506106966e-07, |
|
"loss": 0.7539, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.23773581142305195, |
|
"learning_rate": 3.466294189129249e-07, |
|
"loss": 0.6583, |
|
"step": 3805 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.2623487880042545, |
|
"learning_rate": 3.1037814806634815e-07, |
|
"loss": 0.7085, |
|
"step": 3810 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.21088747078936257, |
|
"learning_rate": 2.7612567936849964e-07, |
|
"loss": 0.6627, |
|
"step": 3815 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.19741270913916117, |
|
"learning_rate": 2.43872699590042e-07, |
|
"loss": 0.7, |
|
"step": 3820 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.2518577876286286, |
|
"learning_rate": 2.136198554113844e-07, |
|
"loss": 0.7888, |
|
"step": 3825 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.25499260076898056, |
|
"learning_rate": 1.8536775340970425e-07, |
|
"loss": 0.7371, |
|
"step": 3830 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.22301016570777385, |
|
"learning_rate": 1.591169600468123e-07, |
|
"loss": 0.7354, |
|
"step": 3835 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.24054514645205372, |
|
"learning_rate": 1.348680016577397e-07, |
|
"loss": 0.7868, |
|
"step": 3840 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.24969353601595792, |
|
"learning_rate": 1.126213644402463e-07, |
|
"loss": 0.7333, |
|
"step": 3845 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.22761401425681646, |
|
"learning_rate": 9.237749444505062e-08, |
|
"loss": 0.6615, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.24426703862468954, |
|
"learning_rate": 7.413679756684832e-08, |
|
"loss": 0.7324, |
|
"step": 3855 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.2867096795033602, |
|
"learning_rate": 5.7899639536251883e-08, |
|
"loss": 0.7088, |
|
"step": 3860 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.26451528734107205, |
|
"learning_rate": 4.366634591237428e-08, |
|
"loss": 0.7354, |
|
"step": 3865 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.2064960678251034, |
|
"learning_rate": 3.143720207635648e-08, |
|
"loss": 0.674, |
|
"step": 3870 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.32104009231527136, |
|
"learning_rate": 2.1212453225627482e-08, |
|
"loss": 0.7268, |
|
"step": 3875 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.28886214510321495, |
|
"learning_rate": 1.299230436898613e-08, |
|
"loss": 0.782, |
|
"step": 3880 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.25342207402007316, |
|
"learning_rate": 6.776920322515423e-09, |
|
"loss": 0.6615, |
|
"step": 3885 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.2610844053652662, |
|
"learning_rate": 2.566425706218567e-09, |
|
"loss": 0.6916, |
|
"step": 3890 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.21290106643942017, |
|
"learning_rate": 3.609049415764787e-10, |
|
"loss": 0.7345, |
|
"step": 3895 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": NaN, |
|
"eval_runtime": 2056.4641, |
|
"eval_samples_per_second": 3.371, |
|
"eval_steps_per_second": 0.843, |
|
"step": 3898 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 3898, |
|
"total_flos": 1.0023612803710976e+16, |
|
"train_loss": 0.7175084921933248, |
|
"train_runtime": 21380.3358, |
|
"train_samples_per_second": 2.917, |
|
"train_steps_per_second": 0.182 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 3898, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 1.0023612803710976e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|