|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.496, |
|
"eval_steps": 156, |
|
"global_step": 312, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.0001, |
|
"loss": 2.3516, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002, |
|
"loss": 2.5189, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0001994638069705094, |
|
"loss": 2.3061, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.00019892761394101877, |
|
"loss": 2.1868, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019839142091152817, |
|
"loss": 2.2999, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019785522788203755, |
|
"loss": 2.1234, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019731903485254693, |
|
"loss": 1.9583, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.0001967828418230563, |
|
"loss": 1.9467, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019624664879356568, |
|
"loss": 0.9932, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00019571045576407506, |
|
"loss": 1.8878, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00019517426273458447, |
|
"loss": 1.919, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019463806970509384, |
|
"loss": 1.8031, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00019410187667560322, |
|
"loss": 2.1202, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.0001935656836461126, |
|
"loss": 1.9934, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00019302949061662198, |
|
"loss": 1.8699, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00019249329758713138, |
|
"loss": 2.1202, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019195710455764076, |
|
"loss": 1.83, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00019142091152815014, |
|
"loss": 1.7661, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00019088471849865952, |
|
"loss": 1.5565, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0001903485254691689, |
|
"loss": 2.0463, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00018981233243967828, |
|
"loss": 1.9589, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00018927613941018768, |
|
"loss": 1.6222, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00018873994638069706, |
|
"loss": 1.6992, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00018820375335120644, |
|
"loss": 1.8366, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00018766756032171581, |
|
"loss": 1.9899, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.0001871313672922252, |
|
"loss": 1.6855, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00018659517426273457, |
|
"loss": 1.9672, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00018605898123324398, |
|
"loss": 1.7919, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00018552278820375335, |
|
"loss": 1.6418, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 0.00018498659517426273, |
|
"loss": 1.7358, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001844504021447721, |
|
"loss": 1.6264, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001839142091152815, |
|
"loss": 1.4462, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.0001833780160857909, |
|
"loss": 1.885, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.0001828418230563003, |
|
"loss": 1.5104, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00018230563002680968, |
|
"loss": 1.7443, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.00018176943699731906, |
|
"loss": 1.4314, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00018123324396782843, |
|
"loss": 1.8016, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.0001806970509383378, |
|
"loss": 0.688, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.0001801608579088472, |
|
"loss": 1.7045, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.0001796246648793566, |
|
"loss": 1.6214, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00017908847184986597, |
|
"loss": 1.6041, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00017855227882037535, |
|
"loss": 1.7231, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00017801608579088473, |
|
"loss": 1.6115, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.0001774798927613941, |
|
"loss": 1.737, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.0001769436997319035, |
|
"loss": 1.6943, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0001764075067024129, |
|
"loss": 1.8335, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00017587131367292227, |
|
"loss": 0.7486, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00017533512064343165, |
|
"loss": 1.7578, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00017479892761394103, |
|
"loss": 1.3638, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.0001742627345844504, |
|
"loss": 1.5721, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.0001737265415549598, |
|
"loss": 1.7746, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.0001731903485254692, |
|
"loss": 1.4914, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00017265415549597856, |
|
"loss": 1.3864, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00017211796246648794, |
|
"loss": 1.4492, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00017158176943699732, |
|
"loss": 1.7473, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00017104557640750673, |
|
"loss": 1.8054, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.0001705093833780161, |
|
"loss": 1.5355, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00016997319034852548, |
|
"loss": 0.6863, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00016943699731903486, |
|
"loss": 1.8431, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.00016890080428954424, |
|
"loss": 1.9419, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00016836461126005362, |
|
"loss": 1.5432, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00016782841823056302, |
|
"loss": 1.5008, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0001672922252010724, |
|
"loss": 1.6897, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00016675603217158178, |
|
"loss": 1.4961, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.00016621983914209116, |
|
"loss": 0.9804, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00016568364611260053, |
|
"loss": 1.7138, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00016514745308310994, |
|
"loss": 2.0109, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00016461126005361932, |
|
"loss": 1.6207, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0001640750670241287, |
|
"loss": 1.93, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00016353887399463807, |
|
"loss": 1.5649, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00016300268096514745, |
|
"loss": 1.6152, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00016246648793565683, |
|
"loss": 1.9486, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00016193029490616624, |
|
"loss": 1.8235, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00016139410187667561, |
|
"loss": 1.8366, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.000160857908847185, |
|
"loss": 1.8362, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00016032171581769437, |
|
"loss": 1.5134, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00015978552278820375, |
|
"loss": 1.8573, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00015924932975871313, |
|
"loss": 2.0278, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.00015871313672922253, |
|
"loss": 1.7282, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.0001581769436997319, |
|
"loss": 1.7742, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.0001576407506702413, |
|
"loss": 1.3045, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00015710455764075067, |
|
"loss": 1.7186, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.00015656836461126004, |
|
"loss": 1.4031, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00015603217158176945, |
|
"loss": 1.778, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00015549597855227883, |
|
"loss": 1.5504, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0001549597855227882, |
|
"loss": 1.8263, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00015442359249329758, |
|
"loss": 1.7688, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.00015388739946380696, |
|
"loss": 1.8441, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00015335120643431634, |
|
"loss": 1.7728, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00015281501340482574, |
|
"loss": 0.7108, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00015227882037533512, |
|
"loss": 1.9269, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0001517426273458445, |
|
"loss": 1.654, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00015120643431635388, |
|
"loss": 0.9516, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00015067024128686326, |
|
"loss": 1.9344, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00015013404825737266, |
|
"loss": 1.5555, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00014959785522788207, |
|
"loss": 1.4938, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00014906166219839145, |
|
"loss": 1.8661, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00014852546916890082, |
|
"loss": 1.6192, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.0001479892761394102, |
|
"loss": 1.4516, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00014745308310991958, |
|
"loss": 1.605, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00014691689008042896, |
|
"loss": 1.3411, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00014638069705093836, |
|
"loss": 1.4481, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00014584450402144774, |
|
"loss": 1.7751, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00014530831099195712, |
|
"loss": 1.5242, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0001447721179624665, |
|
"loss": 1.3182, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00014423592493297588, |
|
"loss": 0.6642, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00014369973190348528, |
|
"loss": 1.7414, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00014316353887399466, |
|
"loss": 1.8329, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00014262734584450404, |
|
"loss": 1.5711, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00014209115281501342, |
|
"loss": 1.3522, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0001415549597855228, |
|
"loss": 1.6554, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00014101876675603217, |
|
"loss": 1.6257, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00014048257372654158, |
|
"loss": 1.5689, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00013994638069705096, |
|
"loss": 1.6837, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00013941018766756033, |
|
"loss": 1.7821, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0001388739946380697, |
|
"loss": 1.2241, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0001383378016085791, |
|
"loss": 1.4491, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0001378016085790885, |
|
"loss": 1.7487, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00013726541554959787, |
|
"loss": 1.6278, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00013672922252010725, |
|
"loss": 1.4211, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00013619302949061663, |
|
"loss": 1.7479, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.000135656836461126, |
|
"loss": 1.8424, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.00013512064343163539, |
|
"loss": 1.646, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0001345844504021448, |
|
"loss": 0.6342, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00013404825737265417, |
|
"loss": 1.5995, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00013351206434316355, |
|
"loss": 1.3449, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00013297587131367293, |
|
"loss": 1.481, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0001324396782841823, |
|
"loss": 1.6196, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00013190348525469168, |
|
"loss": 1.9561, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0001313672922252011, |
|
"loss": 1.6187, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00013083109919571046, |
|
"loss": 1.5728, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00013029490616621984, |
|
"loss": 1.5527, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00012975871313672922, |
|
"loss": 1.5292, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0001292225201072386, |
|
"loss": 0.8898, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.000128686327077748, |
|
"loss": 1.5361, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00012815013404825738, |
|
"loss": 1.5545, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00012761394101876676, |
|
"loss": 0.8376, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00012707774798927614, |
|
"loss": 1.8237, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00012654155495978552, |
|
"loss": 0.6911, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0001260053619302949, |
|
"loss": 1.5879, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0001254691689008043, |
|
"loss": 1.4548, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00012493297587131368, |
|
"loss": 1.8997, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00012439678284182306, |
|
"loss": 1.0416, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00012386058981233243, |
|
"loss": 1.3932, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.0001233243967828418, |
|
"loss": 1.3957, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.00012278820375335122, |
|
"loss": 1.6802, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.0001222520107238606, |
|
"loss": 1.6183, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00012171581769436997, |
|
"loss": 1.413, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00012117962466487935, |
|
"loss": 1.6361, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00012064343163538874, |
|
"loss": 1.3911, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00012010723860589812, |
|
"loss": 1.7734, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.0001195710455764075, |
|
"loss": 1.4433, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00011903485254691689, |
|
"loss": 1.2472, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00011849865951742627, |
|
"loss": 1.643, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00011796246648793565, |
|
"loss": 1.3825, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00011742627345844504, |
|
"loss": 1.4606, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"eval_loss": 1.6580854654312134, |
|
"eval_runtime": 423.9126, |
|
"eval_samples_per_second": 2.359, |
|
"eval_steps_per_second": 0.149, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00011689008042895442, |
|
"loss": 1.4304, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00011635388739946382, |
|
"loss": 1.4293, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0001158176943699732, |
|
"loss": 1.5875, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00011528150134048259, |
|
"loss": 1.5828, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.00011474530831099197, |
|
"loss": 1.6481, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00011420911528150135, |
|
"loss": 1.8307, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00011367292225201074, |
|
"loss": 1.6085, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00011313672922252012, |
|
"loss": 1.5519, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00011260053619302951, |
|
"loss": 0.593, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00011206434316353889, |
|
"loss": 1.5822, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00011152815013404827, |
|
"loss": 1.6974, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00011099195710455766, |
|
"loss": 1.7165, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00011045576407506704, |
|
"loss": 1.431, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00010991957104557641, |
|
"loss": 1.6756, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0001093833780160858, |
|
"loss": 1.7251, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00010884718498659518, |
|
"loss": 1.5319, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00010831099195710456, |
|
"loss": 1.4441, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00010777479892761395, |
|
"loss": 1.4129, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00010723860589812333, |
|
"loss": 1.4783, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00010670241286863272, |
|
"loss": 1.3038, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.0001061662198391421, |
|
"loss": 1.6594, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00010563002680965148, |
|
"loss": 1.7443, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00010509383378016087, |
|
"loss": 1.3424, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00010455764075067025, |
|
"loss": 1.4521, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00010402144772117963, |
|
"loss": 1.7432, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00010348525469168902, |
|
"loss": 1.3248, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.0001029490616621984, |
|
"loss": 1.2616, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00010241286863270778, |
|
"loss": 1.7809, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00010187667560321717, |
|
"loss": 1.6069, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00010134048257372655, |
|
"loss": 0.7396, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00010080428954423592, |
|
"loss": 1.5907, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00010026809651474532, |
|
"loss": 0.9739, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 9.97319034852547e-05, |
|
"loss": 1.4911, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 9.919571045576409e-05, |
|
"loss": 1.5637, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 9.865951742627346e-05, |
|
"loss": 1.6711, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 9.812332439678284e-05, |
|
"loss": 1.401, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 9.758713136729223e-05, |
|
"loss": 1.7772, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 9.705093833780161e-05, |
|
"loss": 1.6001, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 9.651474530831099e-05, |
|
"loss": 1.2741, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 9.597855227882038e-05, |
|
"loss": 1.4815, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.544235924932976e-05, |
|
"loss": 1.8058, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.490616621983914e-05, |
|
"loss": 1.9049, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 9.436997319034853e-05, |
|
"loss": 1.8092, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 9.383378016085791e-05, |
|
"loss": 1.7539, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 9.329758713136729e-05, |
|
"loss": 1.4465, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 9.276139410187668e-05, |
|
"loss": 1.6284, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 9.222520107238606e-05, |
|
"loss": 1.3143, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 9.168900804289545e-05, |
|
"loss": 1.3138, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 9.115281501340484e-05, |
|
"loss": 1.567, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 9.061662198391422e-05, |
|
"loss": 1.6019, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 9.00804289544236e-05, |
|
"loss": 0.8417, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 8.954423592493299e-05, |
|
"loss": 1.4479, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 8.900804289544236e-05, |
|
"loss": 1.6918, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 8.847184986595176e-05, |
|
"loss": 1.5157, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 8.793565683646113e-05, |
|
"loss": 1.3928, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 8.739946380697051e-05, |
|
"loss": 1.3409, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 8.68632707774799e-05, |
|
"loss": 1.8313, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 8.632707774798928e-05, |
|
"loss": 1.5135, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 8.579088471849866e-05, |
|
"loss": 1.4986, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 8.525469168900805e-05, |
|
"loss": 1.694, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 8.471849865951743e-05, |
|
"loss": 1.631, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 8.418230563002681e-05, |
|
"loss": 1.5293, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 8.36461126005362e-05, |
|
"loss": 1.5331, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 8.310991957104558e-05, |
|
"loss": 1.4885, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 8.257372654155497e-05, |
|
"loss": 1.6987, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 8.203753351206435e-05, |
|
"loss": 1.7083, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 8.150134048257373e-05, |
|
"loss": 1.4216, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 8.096514745308312e-05, |
|
"loss": 1.7213, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 8.04289544235925e-05, |
|
"loss": 1.4201, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 7.989276139410187e-05, |
|
"loss": 1.5495, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 7.935656836461127e-05, |
|
"loss": 1.3996, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 7.882037533512064e-05, |
|
"loss": 1.5279, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 7.828418230563002e-05, |
|
"loss": 1.5279, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 7.774798927613941e-05, |
|
"loss": 1.5074, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 7.721179624664879e-05, |
|
"loss": 1.4917, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 7.667560321715817e-05, |
|
"loss": 1.4386, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 7.613941018766756e-05, |
|
"loss": 0.9551, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 7.560321715817694e-05, |
|
"loss": 1.6109, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 7.506702412868633e-05, |
|
"loss": 1.7103, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 7.453083109919572e-05, |
|
"loss": 1.4283, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 7.39946380697051e-05, |
|
"loss": 1.6014, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 7.345844504021448e-05, |
|
"loss": 1.66, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 7.292225201072387e-05, |
|
"loss": 1.6689, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 7.238605898123325e-05, |
|
"loss": 1.6957, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 7.184986595174264e-05, |
|
"loss": 1.5148, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 7.131367292225202e-05, |
|
"loss": 1.3177, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 7.07774798927614e-05, |
|
"loss": 1.5214, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 7.024128686327079e-05, |
|
"loss": 1.5639, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 6.970509383378017e-05, |
|
"loss": 1.2199, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 6.916890080428954e-05, |
|
"loss": 1.626, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 6.863270777479894e-05, |
|
"loss": 1.2711, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 6.809651474530831e-05, |
|
"loss": 1.4732, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 6.756032171581769e-05, |
|
"loss": 1.6072, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 6.702412868632708e-05, |
|
"loss": 1.7638, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 6.648793565683646e-05, |
|
"loss": 1.3744, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 6.595174262734584e-05, |
|
"loss": 0.8892, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 6.541554959785523e-05, |
|
"loss": 1.3256, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 6.487935656836461e-05, |
|
"loss": 1.3321, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 6.4343163538874e-05, |
|
"loss": 0.5158, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 6.380697050938338e-05, |
|
"loss": 1.2251, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 6.327077747989276e-05, |
|
"loss": 1.3238, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 6.273458445040215e-05, |
|
"loss": 1.4272, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 6.219839142091153e-05, |
|
"loss": 1.6344, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 6.16621983914209e-05, |
|
"loss": 1.3657, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 6.11260053619303e-05, |
|
"loss": 1.238, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 6.0589812332439676e-05, |
|
"loss": 1.3978, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 6.005361930294906e-05, |
|
"loss": 0.984, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 5.9517426273458446e-05, |
|
"loss": 1.2924, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 5.8981233243967824e-05, |
|
"loss": 1.3396, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 5.844504021447721e-05, |
|
"loss": 1.2016, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 5.79088471849866e-05, |
|
"loss": 1.5317, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 5.7372654155495985e-05, |
|
"loss": 1.4103, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 5.683646112600537e-05, |
|
"loss": 1.5183, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 5.6300268096514755e-05, |
|
"loss": 1.4417, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 5.5764075067024133e-05, |
|
"loss": 1.1283, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 5.522788203753352e-05, |
|
"loss": 1.3834, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 5.46916890080429e-05, |
|
"loss": 1.2201, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 5.415549597855228e-05, |
|
"loss": 1.4145, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 5.3619302949061666e-05, |
|
"loss": 1.6475, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 5.308310991957105e-05, |
|
"loss": 1.3251, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 5.2546916890080436e-05, |
|
"loss": 1.343, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 5.2010723860589814e-05, |
|
"loss": 1.374, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 5.14745308310992e-05, |
|
"loss": 1.1307, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 5.0938337801608584e-05, |
|
"loss": 1.4826, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 5.040214477211796e-05, |
|
"loss": 1.3937, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 4.986595174262735e-05, |
|
"loss": 1.4125, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 4.932975871313673e-05, |
|
"loss": 1.2325, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 4.879356568364612e-05, |
|
"loss": 1.4383, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 4.8257372654155495e-05, |
|
"loss": 1.1983, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 4.772117962466488e-05, |
|
"loss": 1.3654, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.7184986595174265e-05, |
|
"loss": 1.5061, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 4.664879356568364e-05, |
|
"loss": 1.537, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 4.611260053619303e-05, |
|
"loss": 1.3194, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 4.557640750670242e-05, |
|
"loss": 1.2664, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 4.50402144772118e-05, |
|
"loss": 1.2734, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 4.450402144772118e-05, |
|
"loss": 1.5102, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 4.396782841823057e-05, |
|
"loss": 1.2942, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 4.343163538873995e-05, |
|
"loss": 1.3261, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 4.289544235924933e-05, |
|
"loss": 1.1335, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 4.2359249329758715e-05, |
|
"loss": 0.6077, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 4.18230563002681e-05, |
|
"loss": 1.1896, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 4.1286863270777485e-05, |
|
"loss": 1.5104, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 4.075067024128686e-05, |
|
"loss": 1.3868, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.021447721179625e-05, |
|
"loss": 1.531, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 3.967828418230563e-05, |
|
"loss": 1.5279, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 3.914209115281501e-05, |
|
"loss": 1.4537, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 3.8605898123324396e-05, |
|
"loss": 1.5357, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 3.806970509383378e-05, |
|
"loss": 1.503, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 3.7533512064343166e-05, |
|
"loss": 1.4574, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 3.699731903485255e-05, |
|
"loss": 0.5042, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 3.6461126005361935e-05, |
|
"loss": 1.6059, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 3.592493297587132e-05, |
|
"loss": 1.2128, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 3.53887399463807e-05, |
|
"loss": 1.1628, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 3.485254691689008e-05, |
|
"loss": 1.5253, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 3.431635388739947e-05, |
|
"loss": 1.3093, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 3.3780160857908846e-05, |
|
"loss": 1.2058, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_loss": 1.7549619674682617, |
|
"eval_runtime": 455.1372, |
|
"eval_samples_per_second": 2.197, |
|
"eval_steps_per_second": 0.138, |
|
"step": 312 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 375, |
|
"num_train_epochs": 3, |
|
"save_steps": 156, |
|
"total_flos": 1.161251446849536e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|