|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9995577178239717, |
|
"eval_steps": 142, |
|
"global_step": 565, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 4.588052749633789, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 3.3182, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"eval_loss": 3.3362529277801514, |
|
"eval_runtime": 14.4233, |
|
"eval_samples_per_second": 33.071, |
|
"eval_steps_per_second": 8.32, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 4.520856857299805, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 3.2788, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 4.619396209716797, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 3.3097, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 4.416432857513428, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 2.9162, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 3.6663408279418945, |
|
"learning_rate": 0.00015, |
|
"loss": 2.0914, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 2.739701747894287, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 0.9915, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 1.6202051639556885, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 0.4153, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.975229799747467, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 0.1806, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 1.136542558670044, |
|
"learning_rate": 0.00027, |
|
"loss": 0.1403, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 3.98671555519104, |
|
"learning_rate": 0.0003, |
|
"loss": 0.386, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.3339874744415283, |
|
"learning_rate": 0.0002999997392879692, |
|
"loss": 0.1334, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.4813332259654999, |
|
"learning_rate": 0.0002999989571527831, |
|
"loss": 0.1525, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.3785192370414734, |
|
"learning_rate": 0.0002999976535971604, |
|
"loss": 0.1408, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.1563730090856552, |
|
"learning_rate": 0.00029999582862563263, |
|
"loss": 0.137, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.121751569211483, |
|
"learning_rate": 0.00029999348224454364, |
|
"loss": 0.1371, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.22550074756145477, |
|
"learning_rate": 0.0002999906144620498, |
|
"loss": 0.1512, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.2235211282968521, |
|
"learning_rate": 0.00029998722528811996, |
|
"loss": 0.1483, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 1.4022941589355469, |
|
"learning_rate": 0.0002999833147345355, |
|
"loss": 0.1124, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.16621588170528412, |
|
"learning_rate": 0.0002999788828148901, |
|
"loss": 0.1414, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.06893815100193024, |
|
"learning_rate": 0.00029997392954458983, |
|
"loss": 0.1364, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.3331284821033478, |
|
"learning_rate": 0.000299968454940853, |
|
"loss": 0.1433, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.1405547559261322, |
|
"learning_rate": 0.0002999624590227103, |
|
"loss": 0.1291, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.14127376675605774, |
|
"learning_rate": 0.00029995594181100437, |
|
"loss": 0.1298, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.20128677785396576, |
|
"learning_rate": 0.00029994890332839025, |
|
"loss": 0.1347, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.31639915704727173, |
|
"learning_rate": 0.0002999413435993347, |
|
"loss": 0.1288, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.8167548775672913, |
|
"learning_rate": 0.00029993326265011667, |
|
"loss": 0.1785, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.1112348735332489, |
|
"learning_rate": 0.0002999246605088267, |
|
"loss": 0.1168, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.06746704876422882, |
|
"learning_rate": 0.0002999155372053673, |
|
"loss": 0.1238, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.6724908947944641, |
|
"learning_rate": 0.0002999058927714525, |
|
"loss": 0.2079, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.1440785676240921, |
|
"learning_rate": 0.00029989572724060796, |
|
"loss": 0.1283, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.11361633986234665, |
|
"learning_rate": 0.00029988504064817065, |
|
"loss": 0.14, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.10072154551744461, |
|
"learning_rate": 0.00029987383303128884, |
|
"loss": 0.1389, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.07634437829256058, |
|
"learning_rate": 0.00029986210442892213, |
|
"loss": 0.1373, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.16817660629749298, |
|
"learning_rate": 0.0002998498548818408, |
|
"loss": 0.1385, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.11250842362642288, |
|
"learning_rate": 0.00029983708443262654, |
|
"loss": 0.1389, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.10796725004911423, |
|
"learning_rate": 0.0002998237931256712, |
|
"loss": 0.1414, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.04835154488682747, |
|
"learning_rate": 0.0002998099810071777, |
|
"loss": 0.1348, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.29721006751060486, |
|
"learning_rate": 0.00029979564812515906, |
|
"loss": 0.1374, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.13514482975006104, |
|
"learning_rate": 0.0002997807945294387, |
|
"loss": 0.1395, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.2045493870973587, |
|
"learning_rate": 0.0002997654202716501, |
|
"loss": 0.1258, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.17623500525951385, |
|
"learning_rate": 0.0002997495254052367, |
|
"loss": 0.1399, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.15333342552185059, |
|
"learning_rate": 0.00029973310998545157, |
|
"loss": 0.1407, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.11854752153158188, |
|
"learning_rate": 0.0002997161740693573, |
|
"loss": 0.1365, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.34156811237335205, |
|
"learning_rate": 0.00029969871771582594, |
|
"loss": 0.1064, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.07359682768583298, |
|
"learning_rate": 0.0002996807409855385, |
|
"loss": 0.1267, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.2271728217601776, |
|
"learning_rate": 0.00029966224394098494, |
|
"loss": 0.1378, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.21861566603183746, |
|
"learning_rate": 0.0002996432266464641, |
|
"loss": 0.1415, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.11931619793176651, |
|
"learning_rate": 0.00029962368916808306, |
|
"loss": 0.1375, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.19349679350852966, |
|
"learning_rate": 0.00029960363157375717, |
|
"loss": 0.132, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.35250943899154663, |
|
"learning_rate": 0.00029958305393320997, |
|
"loss": 0.1513, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.10226023942232132, |
|
"learning_rate": 0.00029956195631797257, |
|
"loss": 0.1332, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.34944722056388855, |
|
"learning_rate": 0.00029954033880138364, |
|
"loss": 0.1512, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.24920719861984253, |
|
"learning_rate": 0.00029951820145858915, |
|
"loss": 0.1433, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.5471547842025757, |
|
"learning_rate": 0.0002994955443665421, |
|
"loss": 0.1453, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.04468518868088722, |
|
"learning_rate": 0.00029947236760400215, |
|
"loss": 0.1328, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.1944192349910736, |
|
"learning_rate": 0.00029944867125153543, |
|
"loss": 0.1319, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.10784479230642319, |
|
"learning_rate": 0.0002994244553915143, |
|
"loss": 0.1394, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.16469427943229675, |
|
"learning_rate": 0.0002993997201081169, |
|
"loss": 0.1448, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.07228899002075195, |
|
"learning_rate": 0.00029937446548732716, |
|
"loss": 0.1302, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.17821665108203888, |
|
"learning_rate": 0.0002993486916169341, |
|
"loss": 0.1365, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.1656705141067505, |
|
"learning_rate": 0.0002993223985865318, |
|
"loss": 0.1232, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.25225961208343506, |
|
"learning_rate": 0.0002992955864875192, |
|
"loss": 0.1308, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.26701709628105164, |
|
"learning_rate": 0.00029926825541309925, |
|
"loss": 0.1126, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.30766233801841736, |
|
"learning_rate": 0.0002992404054582793, |
|
"loss": 0.1051, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 1.0783226490020752, |
|
"learning_rate": 0.00029921203671987023, |
|
"loss": 0.1031, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.797533392906189, |
|
"learning_rate": 0.0002991831492964863, |
|
"loss": 0.1243, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.3874399662017822, |
|
"learning_rate": 0.0002991537432885449, |
|
"loss": 0.098, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.5988464951515198, |
|
"learning_rate": 0.0002991238187982659, |
|
"loss": 0.0899, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.5161219835281372, |
|
"learning_rate": 0.00029909337592967173, |
|
"loss": 0.0893, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.9815970659255981, |
|
"learning_rate": 0.0002990624147885866, |
|
"loss": 0.1132, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 1.014042615890503, |
|
"learning_rate": 0.00029903093548263655, |
|
"loss": 0.1526, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.4545246958732605, |
|
"learning_rate": 0.00029899893812124857, |
|
"loss": 0.1303, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.4577489197254181, |
|
"learning_rate": 0.00029896642281565067, |
|
"loss": 0.0745, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.49255430698394775, |
|
"learning_rate": 0.00029893338967887124, |
|
"loss": 0.0924, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.39145103096961975, |
|
"learning_rate": 0.0002988998388257388, |
|
"loss": 0.1121, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.2706741392612457, |
|
"learning_rate": 0.00029886577037288147, |
|
"loss": 0.0727, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.1395532339811325, |
|
"learning_rate": 0.0002988311844387266, |
|
"loss": 0.0509, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.4092063307762146, |
|
"learning_rate": 0.0002987960811435006, |
|
"loss": 0.0785, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.4469921886920929, |
|
"learning_rate": 0.000298760460609228, |
|
"loss": 0.123, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.7060185670852661, |
|
"learning_rate": 0.00029872432295973154, |
|
"loss": 0.1112, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.5608384013175964, |
|
"learning_rate": 0.00029868766832063154, |
|
"loss": 0.1248, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.2733088731765747, |
|
"learning_rate": 0.0002986504968193454, |
|
"loss": 0.08, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.28215891122817993, |
|
"learning_rate": 0.0002986128085850871, |
|
"loss": 0.0603, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.28432655334472656, |
|
"learning_rate": 0.0002985746037488671, |
|
"loss": 0.1094, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.3250674903392792, |
|
"learning_rate": 0.00029853588244349154, |
|
"loss": 0.0937, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.31528908014297485, |
|
"learning_rate": 0.00029849664480356187, |
|
"loss": 0.0984, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.4400915503501892, |
|
"learning_rate": 0.00029845689096547436, |
|
"loss": 0.1054, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.2794625759124756, |
|
"learning_rate": 0.0002984166210674198, |
|
"loss": 0.103, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.24721817672252655, |
|
"learning_rate": 0.00029837583524938287, |
|
"loss": 0.0763, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.17728295922279358, |
|
"learning_rate": 0.00029833453365314146, |
|
"loss": 0.0799, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.42136892676353455, |
|
"learning_rate": 0.00029829271642226664, |
|
"loss": 0.1157, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.26066917181015015, |
|
"learning_rate": 0.0002982503837021218, |
|
"loss": 0.0754, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.3443453013896942, |
|
"learning_rate": 0.00029820753563986226, |
|
"loss": 0.1262, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 1.1035971641540527, |
|
"learning_rate": 0.0002981641723844348, |
|
"loss": 0.2149, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.4937790036201477, |
|
"learning_rate": 0.00029812029408657695, |
|
"loss": 0.0519, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.25491034984588623, |
|
"learning_rate": 0.00029807590089881683, |
|
"loss": 0.0463, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.8630254864692688, |
|
"learning_rate": 0.00029803099297547216, |
|
"loss": 0.1097, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.7856675386428833, |
|
"learning_rate": 0.0002979855704726502, |
|
"loss": 0.1249, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.41131559014320374, |
|
"learning_rate": 0.00029793963354824685, |
|
"loss": 0.0578, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.2710832357406616, |
|
"learning_rate": 0.00029789318236194616, |
|
"loss": 0.0695, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.37780898809432983, |
|
"learning_rate": 0.0002978462170752199, |
|
"loss": 0.1595, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.6756994128227234, |
|
"learning_rate": 0.00029779873785132696, |
|
"loss": 0.1593, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.3440113067626953, |
|
"learning_rate": 0.0002977507448553128, |
|
"loss": 0.1084, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.46663540601730347, |
|
"learning_rate": 0.0002977022382540087, |
|
"loss": 0.1467, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.29806986451148987, |
|
"learning_rate": 0.0002976532182160314, |
|
"loss": 0.114, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.17679756879806519, |
|
"learning_rate": 0.0002976036849117824, |
|
"loss": 0.1148, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.17152459919452667, |
|
"learning_rate": 0.0002975536385134475, |
|
"loss": 0.1135, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.16383178532123566, |
|
"learning_rate": 0.00029750307919499595, |
|
"loss": 0.0987, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.27941015362739563, |
|
"learning_rate": 0.00029745200713217996, |
|
"loss": 0.141, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.1865178793668747, |
|
"learning_rate": 0.0002974004225025344, |
|
"loss": 0.1066, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.09909848123788834, |
|
"learning_rate": 0.0002973483254853756, |
|
"loss": 0.0829, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.17434453964233398, |
|
"learning_rate": 0.0002972957162618011, |
|
"loss": 0.0908, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.1075962707400322, |
|
"learning_rate": 0.0002972425950146891, |
|
"loss": 0.1005, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.23516638576984406, |
|
"learning_rate": 0.00029718896192869755, |
|
"loss": 0.107, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.1640479862689972, |
|
"learning_rate": 0.00029713481719026365, |
|
"loss": 0.0947, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.1907346248626709, |
|
"learning_rate": 0.00029708016098760315, |
|
"loss": 0.0757, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.26523101329803467, |
|
"learning_rate": 0.0002970249935107099, |
|
"loss": 0.1213, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.4174201190471649, |
|
"learning_rate": 0.0002969693149513548, |
|
"loss": 0.1036, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.5090858340263367, |
|
"learning_rate": 0.00029691312550308546, |
|
"loss": 0.1229, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.5807089805603027, |
|
"learning_rate": 0.00029685642536122543, |
|
"loss": 0.1017, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.20680812001228333, |
|
"learning_rate": 0.00029679921472287353, |
|
"loss": 0.0656, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.8702337145805359, |
|
"learning_rate": 0.0002967414937869031, |
|
"loss": 0.2062, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.8191081881523132, |
|
"learning_rate": 0.00029668326275396133, |
|
"loss": 0.1474, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.3332749307155609, |
|
"learning_rate": 0.0002966245218264687, |
|
"loss": 0.0991, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.4073905050754547, |
|
"learning_rate": 0.000296565271208618, |
|
"loss": 0.1113, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 1.1542918682098389, |
|
"learning_rate": 0.00029650551110637393, |
|
"loss": 0.1672, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 1.1717615127563477, |
|
"learning_rate": 0.0002964452417274723, |
|
"loss": 0.1665, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.24656681716442108, |
|
"learning_rate": 0.00029638446328141894, |
|
"loss": 0.113, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.12859101593494415, |
|
"learning_rate": 0.00029632317597948964, |
|
"loss": 0.111, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.0825003981590271, |
|
"learning_rate": 0.0002962613800347288, |
|
"loss": 0.1235, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.14479093253612518, |
|
"learning_rate": 0.0002961990756619491, |
|
"loss": 0.1031, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.2117856740951538, |
|
"learning_rate": 0.0002961362630777305, |
|
"loss": 0.0995, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.1228349581360817, |
|
"learning_rate": 0.00029607294250041965, |
|
"loss": 0.0804, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.18281131982803345, |
|
"learning_rate": 0.000296009114150129, |
|
"loss": 0.0843, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.2280908077955246, |
|
"learning_rate": 0.0002959447782487361, |
|
"loss": 0.1361, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.21089224517345428, |
|
"learning_rate": 0.0002958799350198829, |
|
"loss": 0.136, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.4448394477367401, |
|
"learning_rate": 0.00029581458468897485, |
|
"loss": 0.1293, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.35630327463150024, |
|
"learning_rate": 0.0002957487274831803, |
|
"loss": 0.0892, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.3235824406147003, |
|
"learning_rate": 0.00029568236363142924, |
|
"loss": 0.0862, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.5782188177108765, |
|
"learning_rate": 0.0002956154933644133, |
|
"loss": 0.1067, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.0916428491473198, |
|
"learning_rate": 0.00029554811691458405, |
|
"loss": 0.0717, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.10682029277086258, |
|
"learning_rate": 0.00029548023451615295, |
|
"loss": 0.0729, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 0.09042291343212128, |
|
"eval_runtime": 14.7658, |
|
"eval_samples_per_second": 32.304, |
|
"eval_steps_per_second": 8.127, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.10448458790779114, |
|
"learning_rate": 0.00029541184640509015, |
|
"loss": 0.1013, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.20653600990772247, |
|
"learning_rate": 0.00029534295281912355, |
|
"loss": 0.1109, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.09114749729633331, |
|
"learning_rate": 0.00029527355399773845, |
|
"loss": 0.098, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.13702392578125, |
|
"learning_rate": 0.0002952036501821762, |
|
"loss": 0.0434, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.21909640729427338, |
|
"learning_rate": 0.00029513324161543366, |
|
"loss": 0.1072, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.1765926033258438, |
|
"learning_rate": 0.00029506232854226237, |
|
"loss": 0.0912, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.26851925253868103, |
|
"learning_rate": 0.00029499091120916755, |
|
"loss": 0.1134, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.15720008313655853, |
|
"learning_rate": 0.0002949189898644072, |
|
"loss": 0.0953, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.31068509817123413, |
|
"learning_rate": 0.0002948465647579916, |
|
"loss": 0.1179, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.3737366497516632, |
|
"learning_rate": 0.00029477363614168194, |
|
"loss": 0.0745, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.295796275138855, |
|
"learning_rate": 0.0002947002042689898, |
|
"loss": 0.1448, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.21946462988853455, |
|
"learning_rate": 0.0002946262693951762, |
|
"loss": 0.0938, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.1442556530237198, |
|
"learning_rate": 0.00029455183177725053, |
|
"loss": 0.0778, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.15714137256145477, |
|
"learning_rate": 0.00029447689167396996, |
|
"loss": 0.1192, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.1749090850353241, |
|
"learning_rate": 0.0002944014493458383, |
|
"loss": 0.1065, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.4440750777721405, |
|
"learning_rate": 0.0002943255050551051, |
|
"loss": 0.1143, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.3883216083049774, |
|
"learning_rate": 0.0002942490590657651, |
|
"loss": 0.1257, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.1770515888929367, |
|
"learning_rate": 0.00029417211164355664, |
|
"loss": 0.0917, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.13733014464378357, |
|
"learning_rate": 0.0002940946630559613, |
|
"loss": 0.0694, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.10783812403678894, |
|
"learning_rate": 0.0002940167135722029, |
|
"loss": 0.053, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.4282841086387634, |
|
"learning_rate": 0.0002939382634632463, |
|
"loss": 0.1514, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.2022620588541031, |
|
"learning_rate": 0.00029385931300179673, |
|
"loss": 0.093, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.09893293678760529, |
|
"learning_rate": 0.0002937798624622985, |
|
"loss": 0.0482, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.24493345618247986, |
|
"learning_rate": 0.0002936999121209346, |
|
"loss": 0.1104, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.558118999004364, |
|
"learning_rate": 0.0002936194622556251, |
|
"loss": 0.1593, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.30332863330841064, |
|
"learning_rate": 0.00029353851314602674, |
|
"loss": 0.1431, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.22926372289657593, |
|
"learning_rate": 0.00029345706507353153, |
|
"loss": 0.0903, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.1272840052843094, |
|
"learning_rate": 0.0002933751183212661, |
|
"loss": 0.0581, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.14229409396648407, |
|
"learning_rate": 0.0002932926731740905, |
|
"loss": 0.0786, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.15394841134548187, |
|
"learning_rate": 0.00029320972991859725, |
|
"loss": 0.0891, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.14996570348739624, |
|
"learning_rate": 0.00029312628884311045, |
|
"loss": 0.0697, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.17957860231399536, |
|
"learning_rate": 0.0002930423502376846, |
|
"loss": 0.0597, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.2494930922985077, |
|
"learning_rate": 0.00029295791439410383, |
|
"loss": 0.1173, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.25140491127967834, |
|
"learning_rate": 0.0002928729816058807, |
|
"loss": 0.1015, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.23032979667186737, |
|
"learning_rate": 0.00029278755216825505, |
|
"loss": 0.1271, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.18228159844875336, |
|
"learning_rate": 0.0002927016263781935, |
|
"loss": 0.0758, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.18998126685619354, |
|
"learning_rate": 0.0002926152045343877, |
|
"loss": 0.0658, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.2079799771308899, |
|
"learning_rate": 0.00029252828693725403, |
|
"loss": 0.1376, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.20679551362991333, |
|
"learning_rate": 0.00029244087388893185, |
|
"loss": 0.0989, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.13382777571678162, |
|
"learning_rate": 0.000292352965693283, |
|
"loss": 0.0739, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.1363653689622879, |
|
"learning_rate": 0.00029226456265589045, |
|
"loss": 0.0781, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.21474190056324005, |
|
"learning_rate": 0.00029217566508405737, |
|
"loss": 0.0994, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.1632685363292694, |
|
"learning_rate": 0.000292086273286806, |
|
"loss": 0.0938, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.18359023332595825, |
|
"learning_rate": 0.0002919963875748765, |
|
"loss": 0.0837, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.21232351660728455, |
|
"learning_rate": 0.000291906008260726, |
|
"loss": 0.1021, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.1560080200433731, |
|
"learning_rate": 0.0002918151356585276, |
|
"loss": 0.0775, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.206360325217247, |
|
"learning_rate": 0.00029172377008416893, |
|
"loss": 0.0859, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.16424915194511414, |
|
"learning_rate": 0.0002916319118552515, |
|
"loss": 0.1071, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.15049318969249725, |
|
"learning_rate": 0.00029153956129108913, |
|
"loss": 0.0837, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.08157264441251755, |
|
"learning_rate": 0.0002914467187127073, |
|
"loss": 0.063, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.2039349228143692, |
|
"learning_rate": 0.00029135338444284166, |
|
"loss": 0.1077, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.1816401183605194, |
|
"learning_rate": 0.00029125955880593705, |
|
"loss": 0.1035, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.13115471601486206, |
|
"learning_rate": 0.0002911652421281465, |
|
"loss": 0.0862, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.20337167382240295, |
|
"learning_rate": 0.00029107043473732995, |
|
"loss": 0.0724, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.12583452463150024, |
|
"learning_rate": 0.000290975136963053, |
|
"loss": 0.0701, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.4595123827457428, |
|
"learning_rate": 0.0002908793491365861, |
|
"loss": 0.1763, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.17089878022670746, |
|
"learning_rate": 0.00029078307159090294, |
|
"loss": 0.0478, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.22770017385482788, |
|
"learning_rate": 0.00029068630466067995, |
|
"loss": 0.0551, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.16812334954738617, |
|
"learning_rate": 0.00029058904868229426, |
|
"loss": 0.0829, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.15563331544399261, |
|
"learning_rate": 0.0002904913039938234, |
|
"loss": 0.0551, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.14948880672454834, |
|
"learning_rate": 0.00029039307093504355, |
|
"loss": 0.1255, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.4740292429924011, |
|
"learning_rate": 0.0002902943498474286, |
|
"loss": 0.1865, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.2227841019630432, |
|
"learning_rate": 0.00029019514107414887, |
|
"loss": 0.0801, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.3277839422225952, |
|
"learning_rate": 0.00029009544496006996, |
|
"loss": 0.0785, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.30582112073898315, |
|
"learning_rate": 0.0002899952618517515, |
|
"loss": 0.0802, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.2919937074184418, |
|
"learning_rate": 0.00028989459209744617, |
|
"loss": 0.0533, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.31071627140045166, |
|
"learning_rate": 0.00028979343604709816, |
|
"loss": 0.1474, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.14612245559692383, |
|
"learning_rate": 0.000289691794052342, |
|
"loss": 0.1118, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.2461383044719696, |
|
"learning_rate": 0.0002895896664665017, |
|
"loss": 0.155, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.2300606220960617, |
|
"learning_rate": 0.0002894870536445891, |
|
"loss": 0.1023, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.3330621123313904, |
|
"learning_rate": 0.0002893839559433028, |
|
"loss": 0.1851, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.08405052870512009, |
|
"learning_rate": 0.00028928037372102694, |
|
"loss": 0.1162, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.08986380696296692, |
|
"learning_rate": 0.00028917630733783, |
|
"loss": 0.1033, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.08752616494894028, |
|
"learning_rate": 0.00028907175715546334, |
|
"loss": 0.1081, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.15312182903289795, |
|
"learning_rate": 0.00028896672353736027, |
|
"loss": 0.1084, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.1530596911907196, |
|
"learning_rate": 0.00028886120684863437, |
|
"loss": 0.1143, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.19303447008132935, |
|
"learning_rate": 0.00028875520745607865, |
|
"loss": 0.1322, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.2029002606868744, |
|
"learning_rate": 0.00028864872572816405, |
|
"loss": 0.1, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.0897076427936554, |
|
"learning_rate": 0.00028854176203503806, |
|
"loss": 0.0964, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.14917291700839996, |
|
"learning_rate": 0.00028843431674852363, |
|
"loss": 0.078, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.1606171429157257, |
|
"learning_rate": 0.00028832639024211785, |
|
"loss": 0.0844, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.15833866596221924, |
|
"learning_rate": 0.0002882179828909905, |
|
"loss": 0.103, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.2102007120847702, |
|
"learning_rate": 0.00028810909507198304, |
|
"loss": 0.0783, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.14001865684986115, |
|
"learning_rate": 0.00028799972716360693, |
|
"loss": 0.0999, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.07050840556621552, |
|
"learning_rate": 0.00028788987954604255, |
|
"loss": 0.0504, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.1809457242488861, |
|
"learning_rate": 0.0002877795526011379, |
|
"loss": 0.063, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.19804933667182922, |
|
"learning_rate": 0.00028766874671240706, |
|
"loss": 0.0819, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.8398628830909729, |
|
"learning_rate": 0.0002875574622650291, |
|
"loss": 0.0495, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.28259506821632385, |
|
"learning_rate": 0.0002874456996458466, |
|
"loss": 0.147, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.5770221948623657, |
|
"learning_rate": 0.00028733345924336444, |
|
"loss": 0.0941, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.2959941625595093, |
|
"learning_rate": 0.0002872207414477482, |
|
"loss": 0.15, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.41449636220932007, |
|
"learning_rate": 0.0002871075466508229, |
|
"loss": 0.1244, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.20178668200969696, |
|
"learning_rate": 0.000286993875246072, |
|
"loss": 0.1241, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.2657439708709717, |
|
"learning_rate": 0.0002868797276286355, |
|
"loss": 0.108, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.26215022802352905, |
|
"learning_rate": 0.0002867651041953087, |
|
"loss": 0.0814, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.17194584012031555, |
|
"learning_rate": 0.00028665000534454116, |
|
"loss": 0.0937, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.14481855928897858, |
|
"learning_rate": 0.00028653443147643495, |
|
"loss": 0.0866, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.2787633240222931, |
|
"learning_rate": 0.00028641838299274335, |
|
"loss": 0.0711, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.20350907742977142, |
|
"learning_rate": 0.0002863018602968695, |
|
"loss": 0.146, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.29709184169769287, |
|
"learning_rate": 0.0002861848637938649, |
|
"loss": 0.1273, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.15158367156982422, |
|
"learning_rate": 0.00028606739389042834, |
|
"loss": 0.0778, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.13614635169506073, |
|
"learning_rate": 0.0002859494509949039, |
|
"loss": 0.0609, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.20077449083328247, |
|
"learning_rate": 0.00028583103551728004, |
|
"loss": 0.097, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.08916395157575607, |
|
"learning_rate": 0.00028571214786918806, |
|
"loss": 0.0704, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.18170690536499023, |
|
"learning_rate": 0.00028559278846390033, |
|
"loss": 0.0662, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.20803312957286835, |
|
"learning_rate": 0.00028547295771632936, |
|
"loss": 0.0683, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.40586671233177185, |
|
"learning_rate": 0.0002853526560430261, |
|
"loss": 0.0909, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.16656708717346191, |
|
"learning_rate": 0.0002852318838621784, |
|
"loss": 0.0793, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.24462048709392548, |
|
"learning_rate": 0.00028511064159360977, |
|
"loss": 0.12, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.32009223103523254, |
|
"learning_rate": 0.00028498892965877776, |
|
"loss": 0.1005, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.17075763642787933, |
|
"learning_rate": 0.0002848667484807726, |
|
"loss": 0.088, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.4186219573020935, |
|
"learning_rate": 0.00028474409848431556, |
|
"loss": 0.1242, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.2534870505332947, |
|
"learning_rate": 0.0002846209800957579, |
|
"loss": 0.0672, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.22438766062259674, |
|
"learning_rate": 0.00028449739374307876, |
|
"loss": 0.0628, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.3011839687824249, |
|
"learning_rate": 0.00028437333985588414, |
|
"loss": 0.0731, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.3355525732040405, |
|
"learning_rate": 0.00028424881886540525, |
|
"loss": 0.1556, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.3344305455684662, |
|
"learning_rate": 0.00028412383120449705, |
|
"loss": 0.1138, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.12889593839645386, |
|
"learning_rate": 0.00028399837730763667, |
|
"loss": 0.0386, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.1852964162826538, |
|
"learning_rate": 0.000283872457610922, |
|
"loss": 0.0777, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.15753400325775146, |
|
"learning_rate": 0.00028374607255207007, |
|
"loss": 0.0519, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.33515694737434387, |
|
"learning_rate": 0.00028361922257041575, |
|
"loss": 0.1075, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.4648870825767517, |
|
"learning_rate": 0.00028349190810690974, |
|
"loss": 0.1426, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.47806718945503235, |
|
"learning_rate": 0.0002833641296041176, |
|
"loss": 0.1695, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.13909588754177094, |
|
"learning_rate": 0.000283235887506218, |
|
"loss": 0.0683, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.19695843756198883, |
|
"learning_rate": 0.0002831071822590009, |
|
"loss": 0.072, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.10471002012491226, |
|
"learning_rate": 0.00028297801430986647, |
|
"loss": 0.0725, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.11718752980232239, |
|
"learning_rate": 0.0002828483841078232, |
|
"loss": 0.0799, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.18288259208202362, |
|
"learning_rate": 0.0002827182921034865, |
|
"loss": 0.1348, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.13153032958507538, |
|
"learning_rate": 0.000282587738749077, |
|
"loss": 0.0865, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.2975251376628876, |
|
"learning_rate": 0.00028245672449841915, |
|
"loss": 0.118, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.23405657708644867, |
|
"learning_rate": 0.00028232524980693945, |
|
"loss": 0.1012, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.15189243853092194, |
|
"learning_rate": 0.000282193315131665, |
|
"loss": 0.1089, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.11026138812303543, |
|
"learning_rate": 0.0002820609209312219, |
|
"loss": 0.0626, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.11485940217971802, |
|
"learning_rate": 0.0002819280676658337, |
|
"loss": 0.0767, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.3087170422077179, |
|
"learning_rate": 0.0002817947557973196, |
|
"loss": 0.0892, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.07682602852582932, |
|
"learning_rate": 0.00028166098578909313, |
|
"loss": 0.0467, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.34041598439216614, |
|
"learning_rate": 0.0002815267581061602, |
|
"loss": 0.0758, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.24435921013355255, |
|
"learning_rate": 0.0002813920732151177, |
|
"loss": 0.0772, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.2376328855752945, |
|
"learning_rate": 0.0002812569315841521, |
|
"loss": 0.0935, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.0582437627017498, |
|
"learning_rate": 0.0002811213336830373, |
|
"loss": 0.0224, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.19927099347114563, |
|
"learning_rate": 0.00028098527998313334, |
|
"loss": 0.1243, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.15403445065021515, |
|
"learning_rate": 0.00028084877095738473, |
|
"loss": 0.0278, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 0.078863725066185, |
|
"eval_runtime": 14.7476, |
|
"eval_samples_per_second": 32.344, |
|
"eval_steps_per_second": 8.137, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.240775465965271, |
|
"learning_rate": 0.0002807118070803187, |
|
"loss": 0.0359, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.38222241401672363, |
|
"learning_rate": 0.0002805743888280437, |
|
"loss": 0.1152, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.20165561139583588, |
|
"learning_rate": 0.0002804365166782476, |
|
"loss": 0.0426, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.4264529049396515, |
|
"learning_rate": 0.00028029819111019614, |
|
"loss": 0.1029, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.22892819344997406, |
|
"learning_rate": 0.00028015941260473113, |
|
"loss": 0.0956, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.29828211665153503, |
|
"learning_rate": 0.00028002018164426893, |
|
"loss": 0.0788, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.19386076927185059, |
|
"learning_rate": 0.00027988049871279874, |
|
"loss": 0.0889, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.30981379747390747, |
|
"learning_rate": 0.0002797403642958808, |
|
"loss": 0.1692, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.36764079332351685, |
|
"learning_rate": 0.00027959977888064477, |
|
"loss": 0.0837, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.1701124757528305, |
|
"learning_rate": 0.00027945874295578826, |
|
"loss": 0.1026, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.11988788843154907, |
|
"learning_rate": 0.0002793172570115746, |
|
"loss": 0.0993, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.15950341522693634, |
|
"learning_rate": 0.00027917532153983176, |
|
"loss": 0.0597, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.18084204196929932, |
|
"learning_rate": 0.0002790329370339501, |
|
"loss": 0.0776, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.1948510855436325, |
|
"learning_rate": 0.000278890103988881, |
|
"loss": 0.0899, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.16357307136058807, |
|
"learning_rate": 0.0002787468229011351, |
|
"loss": 0.109, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.15433883666992188, |
|
"learning_rate": 0.0002786030942687805, |
|
"loss": 0.0884, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.11737463623285294, |
|
"learning_rate": 0.00027845891859144085, |
|
"loss": 0.0868, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.1610182225704193, |
|
"learning_rate": 0.000278314296370294, |
|
"loss": 0.1315, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.1955747902393341, |
|
"learning_rate": 0.00027816922810806996, |
|
"loss": 0.0893, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.25150609016418457, |
|
"learning_rate": 0.0002780237143090493, |
|
"loss": 0.1179, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.24072997272014618, |
|
"learning_rate": 0.0002778777554790614, |
|
"loss": 0.0967, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.2026025652885437, |
|
"learning_rate": 0.00027773135212548245, |
|
"loss": 0.0809, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.17768602073192596, |
|
"learning_rate": 0.000277584504757234, |
|
"loss": 0.0726, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.22531519830226898, |
|
"learning_rate": 0.00027743721388478115, |
|
"loss": 0.0797, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.22690407931804657, |
|
"learning_rate": 0.00027728948002013053, |
|
"loss": 0.068, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.3111308217048645, |
|
"learning_rate": 0.00027714130367682875, |
|
"loss": 0.1222, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.31476208567619324, |
|
"learning_rate": 0.0002769926853699606, |
|
"loss": 0.123, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.19214238226413727, |
|
"learning_rate": 0.0002768436256161471, |
|
"loss": 0.0778, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.21092087030410767, |
|
"learning_rate": 0.0002766941249335439, |
|
"loss": 0.0908, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.24106797575950623, |
|
"learning_rate": 0.00027654418384183925, |
|
"loss": 0.1054, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.24553944170475006, |
|
"learning_rate": 0.0002763938028622526, |
|
"loss": 0.0687, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.18032313883304596, |
|
"learning_rate": 0.0002762429825175323, |
|
"loss": 0.0984, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.2341250777244568, |
|
"learning_rate": 0.000276091723331954, |
|
"loss": 0.0861, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.23054689168930054, |
|
"learning_rate": 0.000275940025831319, |
|
"loss": 0.1023, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.15571041405200958, |
|
"learning_rate": 0.000275787890542952, |
|
"loss": 0.061, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.16762854158878326, |
|
"learning_rate": 0.0002756353179956998, |
|
"loss": 0.1124, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.16507089138031006, |
|
"learning_rate": 0.000275482308719929, |
|
"loss": 0.1121, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.166361466050148, |
|
"learning_rate": 0.00027532886324752433, |
|
"loss": 0.1243, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.21798831224441528, |
|
"learning_rate": 0.000275174982111887, |
|
"loss": 0.1074, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.19063718616962433, |
|
"learning_rate": 0.0002750206658479324, |
|
"loss": 0.1058, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.20267756283283234, |
|
"learning_rate": 0.00027486591499208867, |
|
"loss": 0.0875, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.11513421684503555, |
|
"learning_rate": 0.0002747107300822946, |
|
"loss": 0.0674, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.16988199949264526, |
|
"learning_rate": 0.0002745551116579978, |
|
"loss": 0.089, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.20574574172496796, |
|
"learning_rate": 0.0002743990602601529, |
|
"loss": 0.0906, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.14412453770637512, |
|
"learning_rate": 0.00027424257643121966, |
|
"loss": 0.0996, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.1305454671382904, |
|
"learning_rate": 0.00027408566071516084, |
|
"loss": 0.0636, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.18056617677211761, |
|
"learning_rate": 0.00027392831365744073, |
|
"loss": 0.1004, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.15762409567832947, |
|
"learning_rate": 0.00027377053580502297, |
|
"loss": 0.0938, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.2165631651878357, |
|
"learning_rate": 0.00027361232770636856, |
|
"loss": 0.0829, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.21345216035842896, |
|
"learning_rate": 0.0002734536899114343, |
|
"loss": 0.1053, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.22907692193984985, |
|
"learning_rate": 0.00027329462297167066, |
|
"loss": 0.1451, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.24929089844226837, |
|
"learning_rate": 0.0002731351274400198, |
|
"loss": 0.0909, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.13142186403274536, |
|
"learning_rate": 0.00027297520387091376, |
|
"loss": 0.0523, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.27698859572410583, |
|
"learning_rate": 0.0002728148528202725, |
|
"loss": 0.0865, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.2599867880344391, |
|
"learning_rate": 0.000272654074845502, |
|
"loss": 0.0654, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.22103819251060486, |
|
"learning_rate": 0.0002724928705054924, |
|
"loss": 0.1108, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.2073899507522583, |
|
"learning_rate": 0.0002723312403606157, |
|
"loss": 0.0928, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.23784543573856354, |
|
"learning_rate": 0.00027216918497272426, |
|
"loss": 0.095, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.16584208607673645, |
|
"learning_rate": 0.0002720067049051486, |
|
"loss": 0.0521, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.1800609678030014, |
|
"learning_rate": 0.0002718438007226955, |
|
"loss": 0.0737, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.1386508196592331, |
|
"learning_rate": 0.0002716804729916461, |
|
"loss": 0.0522, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.22217071056365967, |
|
"learning_rate": 0.0002715167222797537, |
|
"loss": 0.1045, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.26573020219802856, |
|
"learning_rate": 0.0002713525491562421, |
|
"loss": 0.0719, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.20932160317897797, |
|
"learning_rate": 0.00027118795419180336, |
|
"loss": 0.1289, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.19539090991020203, |
|
"learning_rate": 0.000271022937958596, |
|
"loss": 0.0606, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.15271329879760742, |
|
"learning_rate": 0.00027085750103024295, |
|
"loss": 0.0343, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.25894200801849365, |
|
"learning_rate": 0.00027069164398182944, |
|
"loss": 0.0762, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.16486695408821106, |
|
"learning_rate": 0.00027052536738990125, |
|
"loss": 0.0618, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.24119453132152557, |
|
"learning_rate": 0.00027035867183246244, |
|
"loss": 0.1013, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.35628098249435425, |
|
"learning_rate": 0.00027019155788897355, |
|
"loss": 0.0878, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.14005188643932343, |
|
"learning_rate": 0.0002700240261403494, |
|
"loss": 0.0432, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.13526731729507446, |
|
"learning_rate": 0.0002698560771689572, |
|
"loss": 0.0513, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.2159578949213028, |
|
"learning_rate": 0.0002696877115586146, |
|
"loss": 0.0831, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.26970624923706055, |
|
"learning_rate": 0.00026951892989458744, |
|
"loss": 0.1336, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.17370541393756866, |
|
"learning_rate": 0.00026934973276358787, |
|
"loss": 0.073, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.19606231153011322, |
|
"learning_rate": 0.0002691801207537722, |
|
"loss": 0.0718, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.2545556426048279, |
|
"learning_rate": 0.0002690100944547391, |
|
"loss": 0.1326, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.15419915318489075, |
|
"learning_rate": 0.0002688396544575271, |
|
"loss": 0.0391, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.2486819475889206, |
|
"learning_rate": 0.0002686688013546131, |
|
"loss": 0.1099, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.12814386188983917, |
|
"learning_rate": 0.0002684975357399099, |
|
"loss": 0.0437, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.09224840253591537, |
|
"learning_rate": 0.00026832585820876407, |
|
"loss": 0.0487, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.16062654554843903, |
|
"learning_rate": 0.00026815376935795444, |
|
"loss": 0.0549, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.267597496509552, |
|
"learning_rate": 0.0002679812697856894, |
|
"loss": 0.117, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.23974187672138214, |
|
"learning_rate": 0.0002678083600916051, |
|
"loss": 0.0536, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.1853066086769104, |
|
"learning_rate": 0.0002676350408767634, |
|
"loss": 0.0392, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.4043184220790863, |
|
"learning_rate": 0.00026746131274364975, |
|
"loss": 0.1079, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.19574593007564545, |
|
"learning_rate": 0.0002672871762961709, |
|
"loss": 0.0866, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.34985387325286865, |
|
"learning_rate": 0.00026711263213965314, |
|
"loss": 0.1326, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.3007662892341614, |
|
"learning_rate": 0.0002669376808808399, |
|
"loss": 0.0793, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.2934662103652954, |
|
"learning_rate": 0.00026676232312788993, |
|
"loss": 0.1095, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.19983640313148499, |
|
"learning_rate": 0.0002665865594903748, |
|
"loss": 0.063, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.20034237205982208, |
|
"learning_rate": 0.0002664103905792772, |
|
"loss": 0.0927, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.24908727407455444, |
|
"learning_rate": 0.0002662338170069884, |
|
"loss": 0.0869, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.14556318521499634, |
|
"learning_rate": 0.0002660568393873066, |
|
"loss": 0.0679, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.14582981169223785, |
|
"learning_rate": 0.0002658794583354343, |
|
"loss": 0.062, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.21219995617866516, |
|
"learning_rate": 0.00026570167446797656, |
|
"loss": 0.1065, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.1637437492609024, |
|
"learning_rate": 0.0002655234884029385, |
|
"loss": 0.0754, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.3341504633426666, |
|
"learning_rate": 0.00026534490075972363, |
|
"loss": 0.1185, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.28448009490966797, |
|
"learning_rate": 0.00026516591215913115, |
|
"loss": 0.1244, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.10381971299648285, |
|
"learning_rate": 0.0002649865232233541, |
|
"loss": 0.0538, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.30114489793777466, |
|
"learning_rate": 0.00026480673457597733, |
|
"loss": 0.1083, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.25989478826522827, |
|
"learning_rate": 0.00026462654684197487, |
|
"loss": 0.123, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.14556318521499634, |
|
"learning_rate": 0.00026444596064770833, |
|
"loss": 0.121, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.11710379272699356, |
|
"learning_rate": 0.0002642649766209242, |
|
"loss": 0.0585, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.16899935901165009, |
|
"learning_rate": 0.000264083595390752, |
|
"loss": 0.0905, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.14508718252182007, |
|
"learning_rate": 0.00026390181758770205, |
|
"loss": 0.0807, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.3146648705005646, |
|
"learning_rate": 0.000263719643843663, |
|
"loss": 0.1388, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.18293187022209167, |
|
"learning_rate": 0.0002635370747919002, |
|
"loss": 0.094, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.1427004188299179, |
|
"learning_rate": 0.0002633541110670528, |
|
"loss": 0.0749, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.2598584294319153, |
|
"learning_rate": 0.0002631707533051321, |
|
"loss": 0.1237, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.19870835542678833, |
|
"learning_rate": 0.0002629870021435192, |
|
"loss": 0.0817, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.2234540730714798, |
|
"learning_rate": 0.00026280285822096247, |
|
"loss": 0.1058, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.1785740852355957, |
|
"learning_rate": 0.0002626183221775758, |
|
"loss": 0.0676, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.11512486636638641, |
|
"learning_rate": 0.000262433394654836, |
|
"loss": 0.0893, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.35373592376708984, |
|
"learning_rate": 0.00026224807629558094, |
|
"loss": 0.1077, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.2722702920436859, |
|
"learning_rate": 0.0002620623677440068, |
|
"loss": 0.0925, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.4532068073749542, |
|
"learning_rate": 0.0002618762696456664, |
|
"loss": 0.1217, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.19945968687534332, |
|
"learning_rate": 0.0002616897826474666, |
|
"loss": 0.0899, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.25592172145843506, |
|
"learning_rate": 0.00026150290739766606, |
|
"loss": 0.0519, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.16839353740215302, |
|
"learning_rate": 0.00026131564454587314, |
|
"loss": 0.0814, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.28912433981895447, |
|
"learning_rate": 0.0002611279947430436, |
|
"loss": 0.0737, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.053034551441669464, |
|
"learning_rate": 0.0002609399586414782, |
|
"loss": 0.0157, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.34488940238952637, |
|
"learning_rate": 0.0002607515368948206, |
|
"loss": 0.0951, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.3413633704185486, |
|
"learning_rate": 0.00026056273015805494, |
|
"loss": 0.0712, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.4603371024131775, |
|
"learning_rate": 0.0002603735390875039, |
|
"loss": 0.0946, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.243332177400589, |
|
"learning_rate": 0.0002601839643408259, |
|
"loss": 0.041, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.27589449286460876, |
|
"learning_rate": 0.0002599940065770131, |
|
"loss": 0.0841, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.3786303997039795, |
|
"learning_rate": 0.0002598036664563893, |
|
"loss": 0.1031, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.23370252549648285, |
|
"learning_rate": 0.00025961294464060716, |
|
"loss": 0.0509, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.3785135746002197, |
|
"learning_rate": 0.00025942184179264635, |
|
"loss": 0.1116, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.3081932067871094, |
|
"learning_rate": 0.0002592303585768111, |
|
"loss": 0.08, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.13592147827148438, |
|
"learning_rate": 0.00025903849565872767, |
|
"loss": 0.0269, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.29880547523498535, |
|
"learning_rate": 0.0002588462537053424, |
|
"loss": 0.0596, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.11771093308925629, |
|
"learning_rate": 0.00025865363338491913, |
|
"loss": 0.0549, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.2312057763338089, |
|
"learning_rate": 0.00025846063536703705, |
|
"loss": 0.0744, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.33669716119766235, |
|
"learning_rate": 0.00025826726032258815, |
|
"loss": 0.059, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.2426741123199463, |
|
"learning_rate": 0.00025807350892377513, |
|
"loss": 0.0996, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.4019070863723755, |
|
"learning_rate": 0.000257879381844109, |
|
"loss": 0.0864, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.09328150004148483, |
|
"learning_rate": 0.00025768487975840653, |
|
"loss": 0.0328, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.40782174468040466, |
|
"learning_rate": 0.00025749000334278826, |
|
"loss": 0.0446, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.21026232838630676, |
|
"learning_rate": 0.00025729475327467574, |
|
"loss": 0.096, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.3340080678462982, |
|
"learning_rate": 0.00025709913023278967, |
|
"loss": 0.1243, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 0.07237789034843445, |
|
"eval_runtime": 14.7609, |
|
"eval_samples_per_second": 32.315, |
|
"eval_steps_per_second": 8.13, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.2728588879108429, |
|
"learning_rate": 0.00025690313489714706, |
|
"loss": 0.1162, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.22674015164375305, |
|
"learning_rate": 0.00025670676794905915, |
|
"loss": 0.0822, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.13643956184387207, |
|
"learning_rate": 0.0002565100300711289, |
|
"loss": 0.0652, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.1212250292301178, |
|
"learning_rate": 0.0002563129219472488, |
|
"loss": 0.0515, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.15632915496826172, |
|
"learning_rate": 0.0002561154442625983, |
|
"loss": 0.105, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.14310358464717865, |
|
"learning_rate": 0.00025591759770364145, |
|
"loss": 0.078, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.1461591273546219, |
|
"learning_rate": 0.00025571938295812475, |
|
"loss": 0.0626, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.12564843893051147, |
|
"learning_rate": 0.00025552080071507423, |
|
"loss": 0.1015, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.18360354006290436, |
|
"learning_rate": 0.00025532185166479384, |
|
"loss": 0.125, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.26957467198371887, |
|
"learning_rate": 0.00025512253649886236, |
|
"loss": 0.1049, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.18892253935337067, |
|
"learning_rate": 0.00025492285591013116, |
|
"loss": 0.0907, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.25057581067085266, |
|
"learning_rate": 0.0002547228105927221, |
|
"loss": 0.1081, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.13890071213245392, |
|
"learning_rate": 0.00025452240124202477, |
|
"loss": 0.0865, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.17899222671985626, |
|
"learning_rate": 0.0002543216285546942, |
|
"loss": 0.0437, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.26809147000312805, |
|
"learning_rate": 0.00025412049322864845, |
|
"loss": 0.0507, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.19062833487987518, |
|
"learning_rate": 0.0002539189959630662, |
|
"loss": 0.0516, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.23480018973350525, |
|
"learning_rate": 0.0002537171374583843, |
|
"loss": 0.0672, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.10694094747304916, |
|
"learning_rate": 0.0002535149184162952, |
|
"loss": 0.0248, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.2778875231742859, |
|
"learning_rate": 0.0002533123395397448, |
|
"loss": 0.1295, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.30790090560913086, |
|
"learning_rate": 0.00025310940153292974, |
|
"loss": 0.0518, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.2887485921382904, |
|
"learning_rate": 0.00025290610510129513, |
|
"loss": 0.0739, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.3820766508579254, |
|
"learning_rate": 0.00025270245095153197, |
|
"loss": 0.0873, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.09947656840085983, |
|
"learning_rate": 0.00025249843979157467, |
|
"loss": 0.0137, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.2997414469718933, |
|
"learning_rate": 0.00025229407233059883, |
|
"loss": 0.1314, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.24817639589309692, |
|
"learning_rate": 0.00025208934927901857, |
|
"loss": 0.1275, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.28992751240730286, |
|
"learning_rate": 0.0002518842713484839, |
|
"loss": 0.1294, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.20057415962219238, |
|
"learning_rate": 0.00025167883925187874, |
|
"loss": 0.0559, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.18815775215625763, |
|
"learning_rate": 0.000251473053703318, |
|
"loss": 0.0922, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.2501707375049591, |
|
"learning_rate": 0.00025126691541814514, |
|
"loss": 0.0712, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.19835379719734192, |
|
"learning_rate": 0.00025106042511293005, |
|
"loss": 0.0974, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.1476050615310669, |
|
"learning_rate": 0.0002508535835054661, |
|
"loss": 0.1005, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.18511542677879333, |
|
"learning_rate": 0.0002506463913147679, |
|
"loss": 0.1343, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.1092979907989502, |
|
"learning_rate": 0.0002504388492610687, |
|
"loss": 0.0728, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.11496451497077942, |
|
"learning_rate": 0.00025023095806581797, |
|
"loss": 0.0949, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.10526807606220245, |
|
"learning_rate": 0.0002500227184516789, |
|
"loss": 0.0874, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.12002858519554138, |
|
"learning_rate": 0.0002498141311425258, |
|
"loss": 0.0597, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.18872810900211334, |
|
"learning_rate": 0.00024960519686344164, |
|
"loss": 0.0938, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.15304633975028992, |
|
"learning_rate": 0.0002493959163407154, |
|
"loss": 0.0942, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.13661184906959534, |
|
"learning_rate": 0.0002491862903018398, |
|
"loss": 0.104, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.12348087877035141, |
|
"learning_rate": 0.00024897631947550853, |
|
"loss": 0.0914, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.17801643908023834, |
|
"learning_rate": 0.00024876600459161396, |
|
"loss": 0.1041, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.16505847871303558, |
|
"learning_rate": 0.00024855534638124424, |
|
"loss": 0.0657, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.17176857590675354, |
|
"learning_rate": 0.0002483443455766812, |
|
"loss": 0.0809, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.20494180917739868, |
|
"learning_rate": 0.0002481330029113975, |
|
"loss": 0.0939, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.08561734110116959, |
|
"learning_rate": 0.000247921319120054, |
|
"loss": 0.0316, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.17116233706474304, |
|
"learning_rate": 0.0002477092949384977, |
|
"loss": 0.0593, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.19205418229103088, |
|
"learning_rate": 0.00024749693110375854, |
|
"loss": 0.0684, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.1300123929977417, |
|
"learning_rate": 0.0002472842283540473, |
|
"loss": 0.0581, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.24078501760959625, |
|
"learning_rate": 0.0002470711874287529, |
|
"loss": 0.0483, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.3071228861808777, |
|
"learning_rate": 0.00024685780906843975, |
|
"loss": 0.0961, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.24373668432235718, |
|
"learning_rate": 0.0002466440940148452, |
|
"loss": 0.1139, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.22678449749946594, |
|
"learning_rate": 0.00024643004301087715, |
|
"loss": 0.0324, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.34472745656967163, |
|
"learning_rate": 0.00024621565680061117, |
|
"loss": 0.1041, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.16587162017822266, |
|
"learning_rate": 0.0002460009361292881, |
|
"loss": 0.0992, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.42953774333000183, |
|
"learning_rate": 0.0002457858817433115, |
|
"loss": 0.1284, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.6054782867431641, |
|
"learning_rate": 0.00024557049439024486, |
|
"loss": 0.1034, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.207829549908638, |
|
"learning_rate": 0.0002453547748188092, |
|
"loss": 0.0928, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.36941850185394287, |
|
"learning_rate": 0.00024513872377888036, |
|
"loss": 0.0765, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.20771794021129608, |
|
"learning_rate": 0.0002449223420214864, |
|
"loss": 0.1001, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.16417552530765533, |
|
"learning_rate": 0.00024470563029880497, |
|
"loss": 0.0783, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.12799693644046783, |
|
"learning_rate": 0.0002444885893641609, |
|
"loss": 0.0753, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.21585993468761444, |
|
"learning_rate": 0.00024427121997202313, |
|
"loss": 0.1241, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.1481488198041916, |
|
"learning_rate": 0.00024405352287800266, |
|
"loss": 0.1086, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.18564724922180176, |
|
"learning_rate": 0.00024383549883884949, |
|
"loss": 0.0801, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.16308890283107758, |
|
"learning_rate": 0.00024361714861245015, |
|
"loss": 0.0884, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.10771831125020981, |
|
"learning_rate": 0.00024339847295782503, |
|
"loss": 0.0507, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.11360620707273483, |
|
"learning_rate": 0.00024317947263512578, |
|
"loss": 0.0541, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.17256328463554382, |
|
"learning_rate": 0.00024296014840563264, |
|
"loss": 0.0797, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.14872509241104126, |
|
"learning_rate": 0.00024274050103175192, |
|
"loss": 0.098, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.10749954730272293, |
|
"learning_rate": 0.00024252053127701297, |
|
"loss": 0.0629, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.22221991419792175, |
|
"learning_rate": 0.00024230023990606608, |
|
"loss": 0.0737, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.1851089596748352, |
|
"learning_rate": 0.00024207962768467927, |
|
"loss": 0.0961, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.1685558706521988, |
|
"learning_rate": 0.0002418586953797361, |
|
"loss": 0.0863, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.14281867444515228, |
|
"learning_rate": 0.00024163744375923268, |
|
"loss": 0.0334, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.24673086404800415, |
|
"learning_rate": 0.00024141587359227513, |
|
"loss": 0.1468, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.21355701982975006, |
|
"learning_rate": 0.00024119398564907685, |
|
"loss": 0.1145, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.16007691621780396, |
|
"learning_rate": 0.00024097178070095598, |
|
"loss": 0.0799, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.3183180093765259, |
|
"learning_rate": 0.0002407492595203326, |
|
"loss": 0.1405, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.08811590075492859, |
|
"learning_rate": 0.0002405264228807259, |
|
"loss": 0.0359, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.1502489596605301, |
|
"learning_rate": 0.0002403032715567519, |
|
"loss": 0.0763, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.07442978769540787, |
|
"learning_rate": 0.00024007980632412032, |
|
"loss": 0.0365, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.2458060383796692, |
|
"learning_rate": 0.00023985602795963227, |
|
"loss": 0.0724, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.1589912623167038, |
|
"learning_rate": 0.0002396319372411771, |
|
"loss": 0.0701, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.1447596549987793, |
|
"learning_rate": 0.00023940753494773018, |
|
"loss": 0.0411, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.20220863819122314, |
|
"learning_rate": 0.00023918282185934984, |
|
"loss": 0.082, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.08220729231834412, |
|
"learning_rate": 0.00023895779875717483, |
|
"loss": 0.0423, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.18194489181041718, |
|
"learning_rate": 0.0002387324664234216, |
|
"loss": 0.058, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.2533247470855713, |
|
"learning_rate": 0.00023850682564138142, |
|
"loss": 0.08, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.4217647910118103, |
|
"learning_rate": 0.00023828087719541784, |
|
"loss": 0.1557, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.1498800665140152, |
|
"learning_rate": 0.00023805462187096398, |
|
"loss": 0.0298, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.1469077467918396, |
|
"learning_rate": 0.0002378280604545196, |
|
"loss": 0.0392, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.23052054643630981, |
|
"learning_rate": 0.0002376011937336485, |
|
"loss": 0.1275, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.2608712315559387, |
|
"learning_rate": 0.0002373740224969758, |
|
"loss": 0.0984, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.3406033515930176, |
|
"learning_rate": 0.00023714654753418518, |
|
"loss": 0.1668, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.1383407562971115, |
|
"learning_rate": 0.0002369187696360161, |
|
"loss": 0.0319, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.2986023724079132, |
|
"learning_rate": 0.00023669068959426105, |
|
"loss": 0.0797, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.18390725553035736, |
|
"learning_rate": 0.00023646230820176289, |
|
"loss": 0.0514, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.3618876338005066, |
|
"learning_rate": 0.00023623362625241188, |
|
"loss": 0.1465, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.23905062675476074, |
|
"learning_rate": 0.00023600464454114325, |
|
"loss": 0.0669, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.24474327266216278, |
|
"learning_rate": 0.00023577536386393415, |
|
"loss": 0.1048, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.11233604699373245, |
|
"learning_rate": 0.000235545785017801, |
|
"loss": 0.0653, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.12456522136926651, |
|
"learning_rate": 0.0002353159088007966, |
|
"loss": 0.0855, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.14804989099502563, |
|
"learning_rate": 0.00023508573601200764, |
|
"loss": 0.0554, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.1283787339925766, |
|
"learning_rate": 0.00023485526745155167, |
|
"loss": 0.0644, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.16255222260951996, |
|
"learning_rate": 0.00023462450392057436, |
|
"loss": 0.1088, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.18897709250450134, |
|
"learning_rate": 0.0002343934462212467, |
|
"loss": 0.0673, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.1460375338792801, |
|
"learning_rate": 0.00023416209515676235, |
|
"loss": 0.0923, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.13687273859977722, |
|
"learning_rate": 0.0002339304515313348, |
|
"loss": 0.1142, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.11255405843257904, |
|
"learning_rate": 0.00023369851615019432, |
|
"loss": 0.0513, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.18131427466869354, |
|
"learning_rate": 0.0002334662898195856, |
|
"loss": 0.1058, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.19648568332195282, |
|
"learning_rate": 0.0002332337733467646, |
|
"loss": 0.0721, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.1530655175447464, |
|
"learning_rate": 0.00023300096753999582, |
|
"loss": 0.0698, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.20368973910808563, |
|
"learning_rate": 0.00023276787320854965, |
|
"loss": 0.0925, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.14580555260181427, |
|
"learning_rate": 0.00023253449116269936, |
|
"loss": 0.052, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.20287400484085083, |
|
"learning_rate": 0.00023230082221371832, |
|
"loss": 0.0714, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.2907051742076874, |
|
"learning_rate": 0.00023206686717387737, |
|
"loss": 0.1166, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.3000693619251251, |
|
"learning_rate": 0.00023183262685644174, |
|
"loss": 0.1326, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.0760418102145195, |
|
"learning_rate": 0.0002315981020756683, |
|
"loss": 0.0591, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.144688680768013, |
|
"learning_rate": 0.00023136329364680284, |
|
"loss": 0.074, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.14553490281105042, |
|
"learning_rate": 0.00023112820238607712, |
|
"loss": 0.0609, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.13767151534557343, |
|
"learning_rate": 0.0002308928291107061, |
|
"loss": 0.0694, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.1333126723766327, |
|
"learning_rate": 0.00023065717463888503, |
|
"loss": 0.0509, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.1710643619298935, |
|
"learning_rate": 0.00023042123978978663, |
|
"loss": 0.0957, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.14117231965065002, |
|
"learning_rate": 0.00023018502538355823, |
|
"loss": 0.0372, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.24988646805286407, |
|
"learning_rate": 0.0002299485322413191, |
|
"loss": 0.0711, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.225083127617836, |
|
"learning_rate": 0.00022971176118515731, |
|
"loss": 0.0913, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.2095450758934021, |
|
"learning_rate": 0.00022947471303812704, |
|
"loss": 0.0485, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.2737296521663666, |
|
"learning_rate": 0.0002292373886242456, |
|
"loss": 0.0643, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.15709654986858368, |
|
"learning_rate": 0.0002289997887684908, |
|
"loss": 0.0327, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.17161165177822113, |
|
"learning_rate": 0.00022876191429679785, |
|
"loss": 0.0356, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.4428029954433441, |
|
"learning_rate": 0.00022852376603605656, |
|
"loss": 0.0976, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.3834541440010071, |
|
"learning_rate": 0.00022828534481410845, |
|
"loss": 0.1103, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.4154812693595886, |
|
"learning_rate": 0.00022804665145974396, |
|
"loss": 0.1864, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.2642601430416107, |
|
"learning_rate": 0.0002278076868026995, |
|
"loss": 0.0944, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.166983962059021, |
|
"learning_rate": 0.0002275684516736545, |
|
"loss": 0.0799, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.1374656707048416, |
|
"learning_rate": 0.0002273289469042287, |
|
"loss": 0.0509, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.12492585182189941, |
|
"learning_rate": 0.00022708917332697905, |
|
"loss": 0.0491, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.0952799916267395, |
|
"learning_rate": 0.000226849131775397, |
|
"loss": 0.0561, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.16703763604164124, |
|
"learning_rate": 0.00022660882308390544, |
|
"loss": 0.0882, |
|
"step": 565 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 1695, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 565, |
|
"total_flos": 5.169945694856806e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|