|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9611650485436893, |
|
"eval_steps": 31, |
|
"global_step": 618, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 0.0, |
|
"loss": 4.59, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"eval_loss": 4.240383148193359, |
|
"eval_runtime": 7.311, |
|
"eval_samples_per_second": 164.273, |
|
"eval_steps_per_second": 54.849, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 4.252, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 4.2054, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 6e-06, |
|
"loss": 4.1624, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 3.9787, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1e-05, |
|
"loss": 3.7979, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.2e-05, |
|
"loss": 3.8982, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.4000000000000001e-05, |
|
"loss": 3.805, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 3.7176, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.8e-05, |
|
"loss": 3.4755, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 2e-05, |
|
"loss": 3.6401, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.2000000000000003e-05, |
|
"loss": 3.5615, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 2.4e-05, |
|
"loss": 3.5286, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.6000000000000002e-05, |
|
"loss": 3.5437, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 2.8000000000000003e-05, |
|
"loss": 3.5163, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3e-05, |
|
"loss": 3.4108, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 3.3637, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 3.4000000000000007e-05, |
|
"loss": 3.3538, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 3.6e-05, |
|
"loss": 3.3819, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3.8e-05, |
|
"loss": 3.2511, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4e-05, |
|
"loss": 3.3211, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.2e-05, |
|
"loss": 3.2764, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 3.0653, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.600000000000001e-05, |
|
"loss": 3.0859, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.8e-05, |
|
"loss": 3.0804, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 5e-05, |
|
"loss": 2.9774, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 5.2000000000000004e-05, |
|
"loss": 2.9269, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.4000000000000005e-05, |
|
"loss": 3.0926, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 5.6000000000000006e-05, |
|
"loss": 2.9725, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 5.8e-05, |
|
"loss": 3.0293, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 6e-05, |
|
"loss": 3.0903, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 2.9962990283966064, |
|
"eval_runtime": 7.3671, |
|
"eval_samples_per_second": 163.022, |
|
"eval_steps_per_second": 54.431, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 6.2e-05, |
|
"loss": 2.9903, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 3.0196, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 6.6e-05, |
|
"loss": 3.0288, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 6.800000000000001e-05, |
|
"loss": 3.0071, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 7e-05, |
|
"loss": 3.0393, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 7.2e-05, |
|
"loss": 2.9937, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 7.4e-05, |
|
"loss": 2.9988, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 7.6e-05, |
|
"loss": 2.9331, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 7.800000000000001e-05, |
|
"loss": 3.0414, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8e-05, |
|
"loss": 3.0237, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 8.2e-05, |
|
"loss": 2.9664, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 8.4e-05, |
|
"loss": 2.8639, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 8.6e-05, |
|
"loss": 2.8562, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 2.9632, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 9e-05, |
|
"loss": 2.946, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 9.200000000000001e-05, |
|
"loss": 2.8428, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 9.4e-05, |
|
"loss": 2.9827, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 9.6e-05, |
|
"loss": 2.9512, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 9.8e-05, |
|
"loss": 2.8997, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.0001, |
|
"loss": 2.9762, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 0.00010200000000000001, |
|
"loss": 3.0429, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00010400000000000001, |
|
"loss": 3.0223, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 0.00010600000000000002, |
|
"loss": 3.0007, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00010800000000000001, |
|
"loss": 3.0436, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 0.00011000000000000002, |
|
"loss": 3.0151, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00011200000000000001, |
|
"loss": 2.9909, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00011399999999999999, |
|
"loss": 2.9942, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000116, |
|
"loss": 3.0098, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 0.000118, |
|
"loss": 3.0353, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00012, |
|
"loss": 3.0671, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.000122, |
|
"loss": 2.9824, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"eval_loss": 3.022158145904541, |
|
"eval_runtime": 7.3702, |
|
"eval_samples_per_second": 162.953, |
|
"eval_steps_per_second": 54.408, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000124, |
|
"loss": 3.0207, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 0.000126, |
|
"loss": 2.9048, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00012800000000000002, |
|
"loss": 3.0518, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 0.00013000000000000002, |
|
"loss": 3.0854, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000132, |
|
"loss": 3.0317, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.000134, |
|
"loss": 3.0313, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 0.00013600000000000003, |
|
"loss": 3.0753, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.000138, |
|
"loss": 2.9999, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 0.00014, |
|
"loss": 3.0423, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000142, |
|
"loss": 2.9642, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 0.000144, |
|
"loss": 2.9575, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000146, |
|
"loss": 2.9854, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 0.000148, |
|
"loss": 2.9729, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 2.9176, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.000152, |
|
"loss": 2.947, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.000154, |
|
"loss": 3.0542, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 0.00015600000000000002, |
|
"loss": 3.0718, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00015800000000000002, |
|
"loss": 3.027, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 0.00016, |
|
"loss": 3.1764, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000162, |
|
"loss": 3.1091, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 0.000164, |
|
"loss": 3.0931, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000166, |
|
"loss": 3.2712, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 0.000168, |
|
"loss": 3.3353, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.00017, |
|
"loss": 3.4876, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 0.000172, |
|
"loss": 3.3383, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.000174, |
|
"loss": 3.1497, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 0.00017600000000000002, |
|
"loss": 3.1029, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00017800000000000002, |
|
"loss": 3.1484, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 0.00018, |
|
"loss": 3.1156, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.000182, |
|
"loss": 3.2557, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.00018400000000000003, |
|
"loss": 3.173, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_loss": 3.150902032852173, |
|
"eval_runtime": 7.5676, |
|
"eval_samples_per_second": 158.703, |
|
"eval_steps_per_second": 52.989, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.00018600000000000002, |
|
"loss": 3.128, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 0.000188, |
|
"loss": 3.146, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00019, |
|
"loss": 3.194, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.000192, |
|
"loss": 3.0987, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.000194, |
|
"loss": 3.2405, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 0.000196, |
|
"loss": 3.1568, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.00019800000000000002, |
|
"loss": 3.1488, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 0.0002, |
|
"loss": 3.2105, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.0001996138996138996, |
|
"loss": 3.2575, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00019922779922779924, |
|
"loss": 3.1921, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 0.00019884169884169884, |
|
"loss": 3.2369, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00019845559845559847, |
|
"loss": 3.1031, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 0.00019806949806949807, |
|
"loss": 3.2618, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0001976833976833977, |
|
"loss": 3.2034, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0001972972972972973, |
|
"loss": 3.2094, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00019691119691119693, |
|
"loss": 3.1894, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 0.00019652509652509653, |
|
"loss": 3.1614, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00019613899613899616, |
|
"loss": 3.176, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 0.00019575289575289576, |
|
"loss": 3.2153, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.0001953667953667954, |
|
"loss": 3.0923, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 0.000194980694980695, |
|
"loss": 3.2878, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00019459459459459462, |
|
"loss": 3.0605, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.00019420849420849422, |
|
"loss": 3.1282, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00019382239382239382, |
|
"loss": 3.1204, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 0.00019343629343629345, |
|
"loss": 3.0932, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00019305019305019305, |
|
"loss": 3.2913, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 0.00019266409266409268, |
|
"loss": 3.1809, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.00019227799227799228, |
|
"loss": 3.2408, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 0.0001918918918918919, |
|
"loss": 3.8133, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0001915057915057915, |
|
"loss": 3.1869, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.00019111969111969114, |
|
"loss": 3.1053, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_loss": 3.205463409423828, |
|
"eval_runtime": 7.3677, |
|
"eval_samples_per_second": 163.009, |
|
"eval_steps_per_second": 54.427, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00019073359073359074, |
|
"loss": 3.1206, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 0.00019034749034749037, |
|
"loss": 3.1233, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.00018996138996138997, |
|
"loss": 3.0673, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 0.0001895752895752896, |
|
"loss": 3.1314, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0001891891891891892, |
|
"loss": 3.1997, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 0.0001888030888030888, |
|
"loss": 3.1298, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00018841698841698843, |
|
"loss": 3.1821, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 0.00018803088803088803, |
|
"loss": 3.2418, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00018764478764478766, |
|
"loss": 3.1543, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00018725868725868726, |
|
"loss": 3.2136, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0001868725868725869, |
|
"loss": 3.3314, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 0.0001864864864864865, |
|
"loss": 3.2328, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00018610038610038612, |
|
"loss": 3.2225, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00018571428571428572, |
|
"loss": 3.1159, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00018532818532818535, |
|
"loss": 3.0339, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00018494208494208495, |
|
"loss": 3.2672, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 0.00018455598455598458, |
|
"loss": 3.1237, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.00018416988416988418, |
|
"loss": 3.1692, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 0.0001837837837837838, |
|
"loss": 3.3243, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.0001833976833976834, |
|
"loss": 3.0264, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 0.000183011583011583, |
|
"loss": 3.2045, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00018262548262548264, |
|
"loss": 3.1966, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 0.00018223938223938224, |
|
"loss": 3.0587, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00018185328185328187, |
|
"loss": 3.2979, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 0.00018146718146718147, |
|
"loss": 3.1549, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0001810810810810811, |
|
"loss": 3.1682, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.0001806949806949807, |
|
"loss": 3.3214, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00018030888030888032, |
|
"loss": 3.1783, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00017992277992277993, |
|
"loss": 3.2268, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00017953667953667955, |
|
"loss": 3.2843, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00017915057915057916, |
|
"loss": 3.3124, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 3.1898298263549805, |
|
"eval_runtime": 7.5884, |
|
"eval_samples_per_second": 158.268, |
|
"eval_steps_per_second": 52.844, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00017876447876447878, |
|
"loss": 3.1855, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.00017837837837837839, |
|
"loss": 3.2096, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00017799227799227801, |
|
"loss": 3.2174, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00017760617760617762, |
|
"loss": 3.2775, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00017722007722007722, |
|
"loss": 3.2065, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00017683397683397684, |
|
"loss": 3.2905, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00017644787644787645, |
|
"loss": 3.1591, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00017606177606177607, |
|
"loss": 3.2721, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00017567567567567568, |
|
"loss": 3.1743, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.0001752895752895753, |
|
"loss": 3.234, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.0001749034749034749, |
|
"loss": 3.2775, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00017451737451737453, |
|
"loss": 3.2317, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00017413127413127413, |
|
"loss": 3.0691, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00017374517374517376, |
|
"loss": 3.1793, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.00017335907335907336, |
|
"loss": 3.2259, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.000172972972972973, |
|
"loss": 3.1813, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 0.0001725868725868726, |
|
"loss": 3.2416, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.0001722007722007722, |
|
"loss": 3.2016, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00017181467181467182, |
|
"loss": 3.1766, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00017142857142857143, |
|
"loss": 3.0861, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.00017104247104247105, |
|
"loss": 3.2104, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00017065637065637065, |
|
"loss": 3.273, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00017027027027027028, |
|
"loss": 3.2371, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00016988416988416988, |
|
"loss": 3.2654, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.0001694980694980695, |
|
"loss": 3.1812, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00016911196911196911, |
|
"loss": 3.2781, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00016872586872586874, |
|
"loss": 3.1611, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00016833976833976834, |
|
"loss": 3.0902, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00016795366795366797, |
|
"loss": 3.2414, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00016756756756756757, |
|
"loss": 3.1472, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.0001671814671814672, |
|
"loss": 3.1761, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_loss": 3.1637256145477295, |
|
"eval_runtime": 7.3669, |
|
"eval_samples_per_second": 163.026, |
|
"eval_steps_per_second": 54.432, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0001667953667953668, |
|
"loss": 3.1409, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.0001664092664092664, |
|
"loss": 3.2262, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00016602316602316603, |
|
"loss": 3.105, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00016563706563706563, |
|
"loss": 3.2596, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00016525096525096526, |
|
"loss": 3.1528, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00016486486486486486, |
|
"loss": 3.1561, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0001644787644787645, |
|
"loss": 3.2552, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 0.0001640926640926641, |
|
"loss": 3.0347, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00016370656370656372, |
|
"loss": 3.0418, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00016332046332046332, |
|
"loss": 3.0838, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00016293436293436295, |
|
"loss": 3.1867, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00016254826254826255, |
|
"loss": 3.0373, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00016216216216216218, |
|
"loss": 2.9896, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00016177606177606178, |
|
"loss": 3.1511, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0001613899613899614, |
|
"loss": 3.1029, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.000161003861003861, |
|
"loss": 3.2193, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.0001606177606177606, |
|
"loss": 3.2214, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00016023166023166024, |
|
"loss": 3.1428, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00015984555984555984, |
|
"loss": 3.1259, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00015945945945945947, |
|
"loss": 3.2007, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00015907335907335907, |
|
"loss": 3.1123, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0001586872586872587, |
|
"loss": 3.3417, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0001583011583011583, |
|
"loss": 3.089, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00015791505791505793, |
|
"loss": 3.0972, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00015752895752895753, |
|
"loss": 2.1341, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00015714285714285716, |
|
"loss": 1.9415, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.00015675675675675676, |
|
"loss": 1.959, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.0001563706563706564, |
|
"loss": 1.857, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.000155984555984556, |
|
"loss": 1.8255, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00015559845559845562, |
|
"loss": 1.6538, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00015521235521235522, |
|
"loss": 1.9162, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"eval_loss": 3.399775981903076, |
|
"eval_runtime": 7.3711, |
|
"eval_samples_per_second": 162.933, |
|
"eval_steps_per_second": 54.401, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00015482625482625482, |
|
"loss": 1.8293, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.00015444015444015445, |
|
"loss": 1.8539, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00015405405405405405, |
|
"loss": 1.7888, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00015366795366795368, |
|
"loss": 1.7813, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.00015328185328185328, |
|
"loss": 1.8911, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 0.0001528957528957529, |
|
"loss": 1.839, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0001525096525096525, |
|
"loss": 1.8223, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.00015212355212355214, |
|
"loss": 1.825, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00015173745173745174, |
|
"loss": 1.7962, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00015135135135135137, |
|
"loss": 1.8117, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00015096525096525097, |
|
"loss": 1.9061, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.0001505791505791506, |
|
"loss": 1.8982, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0001501930501930502, |
|
"loss": 1.9087, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.0001498069498069498, |
|
"loss": 1.9664, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00014942084942084943, |
|
"loss": 1.7921, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00014903474903474903, |
|
"loss": 1.9163, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00014864864864864866, |
|
"loss": 1.8759, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00014826254826254826, |
|
"loss": 1.9262, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0001478764478764479, |
|
"loss": 1.9063, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0001474903474903475, |
|
"loss": 1.8577, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00014710424710424712, |
|
"loss": 1.7452, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00014671814671814672, |
|
"loss": 1.9344, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00014633204633204635, |
|
"loss": 1.7575, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.00014594594594594595, |
|
"loss": 1.7707, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00014555984555984558, |
|
"loss": 1.8945, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00014517374517374518, |
|
"loss": 1.8379, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0001447876447876448, |
|
"loss": 1.9021, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.0001444015444015444, |
|
"loss": 1.844, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 1.17, |
|
"learning_rate": 0.000144015444015444, |
|
"loss": 1.9396, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00014362934362934364, |
|
"loss": 2.0305, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00014324324324324324, |
|
"loss": 1.8985, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_loss": 3.330674409866333, |
|
"eval_runtime": 7.364, |
|
"eval_samples_per_second": 163.091, |
|
"eval_steps_per_second": 54.454, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00014285714285714287, |
|
"loss": 1.8457, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00014247104247104247, |
|
"loss": 1.8213, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0001420849420849421, |
|
"loss": 1.7586, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.0001416988416988417, |
|
"loss": 1.8669, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00014131274131274133, |
|
"loss": 1.9476, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.00014092664092664093, |
|
"loss": 1.8525, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00014054054054054056, |
|
"loss": 2.0163, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00014015444015444016, |
|
"loss": 1.9186, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00013976833976833979, |
|
"loss": 1.9528, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.0001393822393822394, |
|
"loss": 2.2483, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00013899613899613902, |
|
"loss": 1.8889, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.00013861003861003862, |
|
"loss": 2.0137, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00013822393822393822, |
|
"loss": 1.9397, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00013783783783783785, |
|
"loss": 1.8241, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00013745173745173745, |
|
"loss": 1.9685, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.00013706563706563708, |
|
"loss": 1.9909, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00013667953667953668, |
|
"loss": 1.8653, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0001362934362934363, |
|
"loss": 1.9163, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.0001359073359073359, |
|
"loss": 1.9765, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 0.00013552123552123554, |
|
"loss": 1.788, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.00013513513513513514, |
|
"loss": 1.8103, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.00013474903474903477, |
|
"loss": 2.0086, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00013436293436293437, |
|
"loss": 1.9448, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.000133976833976834, |
|
"loss": 1.8598, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0001335907335907336, |
|
"loss": 2.0792, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.0001332046332046332, |
|
"loss": 1.7766, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00013281853281853283, |
|
"loss": 1.9329, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00013243243243243243, |
|
"loss": 1.9933, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00013204633204633206, |
|
"loss": 1.9371, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00013166023166023166, |
|
"loss": 1.9629, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00013127413127413129, |
|
"loss": 2.0488, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"eval_loss": 3.333745241165161, |
|
"eval_runtime": 7.361, |
|
"eval_samples_per_second": 163.157, |
|
"eval_steps_per_second": 54.476, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.0001308880308880309, |
|
"loss": 2.0148, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00013050193050193052, |
|
"loss": 1.8416, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00013011583011583012, |
|
"loss": 2.1004, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00012972972972972974, |
|
"loss": 1.8308, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00012934362934362935, |
|
"loss": 1.9441, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00012895752895752897, |
|
"loss": 2.083, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00012857142857142858, |
|
"loss": 1.8198, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.0001281853281853282, |
|
"loss": 2.0069, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0001277992277992278, |
|
"loss": 2.0146, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.0001274131274131274, |
|
"loss": 1.8554, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00012702702702702703, |
|
"loss": 1.972, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.00012664092664092664, |
|
"loss": 1.9583, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00012625482625482626, |
|
"loss": 1.8567, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00012586872586872587, |
|
"loss": 2.0031, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0001254826254826255, |
|
"loss": 1.9725, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.0001250965250965251, |
|
"loss": 1.9517, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00012471042471042472, |
|
"loss": 1.7436, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00012432432432432433, |
|
"loss": 1.9968, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00012393822393822395, |
|
"loss": 1.8299, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 0.00012355212355212355, |
|
"loss": 2.1024, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00012316602316602318, |
|
"loss": 1.8099, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 0.00012277992277992278, |
|
"loss": 1.9761, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.0001223938223938224, |
|
"loss": 2.1201, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 0.00012200772200772201, |
|
"loss": 1.9268, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00012162162162162163, |
|
"loss": 1.8136, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 0.00012123552123552124, |
|
"loss": 2.0362, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00012084942084942086, |
|
"loss": 2.0653, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 0.00012046332046332047, |
|
"loss": 2.022, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.00012007722007722009, |
|
"loss": 1.9317, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 0.0001196911196911197, |
|
"loss": 2.0455, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00011930501930501932, |
|
"loss": 2.0812, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"eval_loss": 3.341298818588257, |
|
"eval_runtime": 7.3644, |
|
"eval_samples_per_second": 163.081, |
|
"eval_steps_per_second": 54.451, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.00011891891891891893, |
|
"loss": 2.0609, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00011853281853281855, |
|
"loss": 1.9708, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00011814671814671816, |
|
"loss": 1.9968, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.00011776061776061778, |
|
"loss": 2.0283, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00011737451737451739, |
|
"loss": 2.0142, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 0.00011698841698841701, |
|
"loss": 2.026, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.0001166023166023166, |
|
"loss": 2.1965, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 0.00011621621621621621, |
|
"loss": 1.984, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00011583011583011582, |
|
"loss": 2.0699, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00011544401544401544, |
|
"loss": 1.864, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00011505791505791505, |
|
"loss": 2.0219, |
|
"step": 321 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 0.00011467181467181467, |
|
"loss": 1.9162, |
|
"step": 322 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.00011428571428571428, |
|
"loss": 1.9092, |
|
"step": 323 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 0.0001138996138996139, |
|
"loss": 2.0932, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00011351351351351351, |
|
"loss": 2.0975, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.00011312741312741313, |
|
"loss": 2.1674, |
|
"step": 326 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00011274131274131274, |
|
"loss": 1.8444, |
|
"step": 327 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 0.00011235521235521236, |
|
"loss": 1.9696, |
|
"step": 328 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00011196911196911197, |
|
"loss": 1.943, |
|
"step": 329 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 0.00011158301158301159, |
|
"loss": 2.1044, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0001111969111969112, |
|
"loss": 2.2068, |
|
"step": 331 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.00011081081081081082, |
|
"loss": 2.0958, |
|
"step": 332 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00011042471042471043, |
|
"loss": 1.9789, |
|
"step": 333 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 0.00011003861003861005, |
|
"loss": 1.8663, |
|
"step": 334 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00010965250965250966, |
|
"loss": 2.0499, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 0.00010926640926640928, |
|
"loss": 1.935, |
|
"step": 336 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.00010888030888030889, |
|
"loss": 2.0021, |
|
"step": 337 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 0.0001084942084942085, |
|
"loss": 1.953, |
|
"step": 338 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00010810810810810812, |
|
"loss": 2.0466, |
|
"step": 339 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 0.00010772200772200774, |
|
"loss": 1.9709, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00010733590733590735, |
|
"loss": 1.8884, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"eval_loss": 3.323758602142334, |
|
"eval_runtime": 7.3665, |
|
"eval_samples_per_second": 163.036, |
|
"eval_steps_per_second": 54.436, |
|
"step": 341 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.00010694980694980697, |
|
"loss": 2.0557, |
|
"step": 342 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.00010656370656370658, |
|
"loss": 2.0345, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 0.0001061776061776062, |
|
"loss": 1.8173, |
|
"step": 344 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.00010579150579150581, |
|
"loss": 1.9598, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 0.0001054054054054054, |
|
"loss": 2.0323, |
|
"step": 346 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00010501930501930501, |
|
"loss": 1.9284, |
|
"step": 347 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00010463320463320463, |
|
"loss": 2.1235, |
|
"step": 348 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00010424710424710424, |
|
"loss": 1.9426, |
|
"step": 349 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00010386100386100386, |
|
"loss": 1.8692, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.00010347490347490347, |
|
"loss": 1.9559, |
|
"step": 351 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.00010308880308880309, |
|
"loss": 2.0407, |
|
"step": 352 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 0.0001027027027027027, |
|
"loss": 1.9356, |
|
"step": 353 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00010231660231660232, |
|
"loss": 1.9055, |
|
"step": 354 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 0.00010193050193050193, |
|
"loss": 1.9776, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00010154440154440155, |
|
"loss": 1.8996, |
|
"step": 356 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00010115830115830116, |
|
"loss": 1.8893, |
|
"step": 357 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00010077220077220078, |
|
"loss": 1.9621, |
|
"step": 358 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"learning_rate": 0.00010038610038610039, |
|
"loss": 1.9447, |
|
"step": 359 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 0.0001, |
|
"loss": 1.9646, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 9.961389961389962e-05, |
|
"loss": 1.9459, |
|
"step": 361 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 9.922779922779923e-05, |
|
"loss": 2.0168, |
|
"step": 362 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 9.884169884169885e-05, |
|
"loss": 2.0021, |
|
"step": 363 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 9.845559845559846e-05, |
|
"loss": 1.7642, |
|
"step": 364 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 9.806949806949808e-05, |
|
"loss": 2.0138, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 9.76833976833977e-05, |
|
"loss": 2.0108, |
|
"step": 366 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 9.729729729729731e-05, |
|
"loss": 2.0955, |
|
"step": 367 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 9.691119691119691e-05, |
|
"loss": 2.1705, |
|
"step": 368 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 9.652509652509652e-05, |
|
"loss": 1.8494, |
|
"step": 369 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 9.613899613899614e-05, |
|
"loss": 1.8676, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 9.575289575289575e-05, |
|
"loss": 1.821, |
|
"step": 371 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 9.536679536679537e-05, |
|
"loss": 2.0281, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"eval_loss": 3.2718801498413086, |
|
"eval_runtime": 7.3652, |
|
"eval_samples_per_second": 163.063, |
|
"eval_steps_per_second": 54.445, |
|
"step": 372 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 9.498069498069498e-05, |
|
"loss": 2.1556, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 9.45945945945946e-05, |
|
"loss": 1.9643, |
|
"step": 374 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 9.420849420849421e-05, |
|
"loss": 2.0287, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 9.382239382239383e-05, |
|
"loss": 1.927, |
|
"step": 376 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 9.343629343629344e-05, |
|
"loss": 1.9838, |
|
"step": 377 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 9.305019305019306e-05, |
|
"loss": 1.9065, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 9.266409266409267e-05, |
|
"loss": 2.056, |
|
"step": 379 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 9.227799227799229e-05, |
|
"loss": 1.9277, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 9.18918918918919e-05, |
|
"loss": 1.7797, |
|
"step": 381 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 9.15057915057915e-05, |
|
"loss": 1.8818, |
|
"step": 382 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 9.111969111969112e-05, |
|
"loss": 1.947, |
|
"step": 383 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 9.073359073359073e-05, |
|
"loss": 1.942, |
|
"step": 384 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 9.034749034749035e-05, |
|
"loss": 1.9998, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 8.996138996138996e-05, |
|
"loss": 1.8805, |
|
"step": 386 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 8.957528957528958e-05, |
|
"loss": 1.8903, |
|
"step": 387 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 8.918918918918919e-05, |
|
"loss": 1.9189, |
|
"step": 388 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 8.880308880308881e-05, |
|
"loss": 2.0308, |
|
"step": 389 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 8.841698841698842e-05, |
|
"loss": 2.0768, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 8.803088803088804e-05, |
|
"loss": 1.9168, |
|
"step": 391 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 8.764478764478765e-05, |
|
"loss": 1.8967, |
|
"step": 392 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 8.725868725868727e-05, |
|
"loss": 1.9347, |
|
"step": 393 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 8.687258687258688e-05, |
|
"loss": 1.8273, |
|
"step": 394 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 8.64864864864865e-05, |
|
"loss": 1.9801, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 8.61003861003861e-05, |
|
"loss": 2.0002, |
|
"step": 396 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 8.571428571428571e-05, |
|
"loss": 2.0318, |
|
"step": 397 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 8.532818532818533e-05, |
|
"loss": 1.8399, |
|
"step": 398 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 8.494208494208494e-05, |
|
"loss": 1.8956, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 8.455598455598456e-05, |
|
"loss": 2.0156, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 8.416988416988417e-05, |
|
"loss": 1.9499, |
|
"step": 401 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 8.378378378378379e-05, |
|
"loss": 1.8823, |
|
"step": 402 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 8.33976833976834e-05, |
|
"loss": 2.1344, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"eval_loss": 3.2487306594848633, |
|
"eval_runtime": 7.3645, |
|
"eval_samples_per_second": 163.08, |
|
"eval_steps_per_second": 54.451, |
|
"step": 403 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 8.301158301158302e-05, |
|
"loss": 1.9887, |
|
"step": 404 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 8.262548262548263e-05, |
|
"loss": 2.0445, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 8.223938223938225e-05, |
|
"loss": 1.8847, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 8.185328185328186e-05, |
|
"loss": 1.8461, |
|
"step": 407 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 8.146718146718148e-05, |
|
"loss": 1.9106, |
|
"step": 408 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 8.108108108108109e-05, |
|
"loss": 2.0067, |
|
"step": 409 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 8.06949806949807e-05, |
|
"loss": 1.9705, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 8.03088803088803e-05, |
|
"loss": 1.8092, |
|
"step": 411 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 7.992277992277992e-05, |
|
"loss": 1.8563, |
|
"step": 412 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 7.953667953667954e-05, |
|
"loss": 1.8833, |
|
"step": 413 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 7.915057915057915e-05, |
|
"loss": 1.9905, |
|
"step": 414 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 7.876447876447877e-05, |
|
"loss": 2.0448, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 7.837837837837838e-05, |
|
"loss": 1.9066, |
|
"step": 416 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 7.7992277992278e-05, |
|
"loss": 1.8585, |
|
"step": 417 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 7.760617760617761e-05, |
|
"loss": 2.0163, |
|
"step": 418 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 7.722007722007723e-05, |
|
"loss": 1.8571, |
|
"step": 419 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 7.683397683397684e-05, |
|
"loss": 2.0083, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 7.644787644787645e-05, |
|
"loss": 0.6158, |
|
"step": 421 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 7.606177606177607e-05, |
|
"loss": 0.7386, |
|
"step": 422 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 7.567567567567568e-05, |
|
"loss": 0.7067, |
|
"step": 423 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 7.52895752895753e-05, |
|
"loss": 0.6173, |
|
"step": 424 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 7.49034749034749e-05, |
|
"loss": 0.5876, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 7.451737451737452e-05, |
|
"loss": 0.5948, |
|
"step": 426 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 7.413127413127413e-05, |
|
"loss": 0.5593, |
|
"step": 427 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 7.374517374517374e-05, |
|
"loss": 0.5989, |
|
"step": 428 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 7.335907335907336e-05, |
|
"loss": 0.5699, |
|
"step": 429 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 7.297297297297297e-05, |
|
"loss": 0.5719, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 7.258687258687259e-05, |
|
"loss": 0.4928, |
|
"step": 431 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 7.22007722007722e-05, |
|
"loss": 0.4713, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 7.181467181467182e-05, |
|
"loss": 0.6161, |
|
"step": 433 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 7.142857142857143e-05, |
|
"loss": 0.566, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"eval_loss": 4.280820369720459, |
|
"eval_runtime": 7.3682, |
|
"eval_samples_per_second": 162.998, |
|
"eval_steps_per_second": 54.423, |
|
"step": 434 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 7.104247104247105e-05, |
|
"loss": 0.5182, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 7.065637065637066e-05, |
|
"loss": 0.6347, |
|
"step": 436 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 7.027027027027028e-05, |
|
"loss": 0.6002, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 6.988416988416989e-05, |
|
"loss": 0.5696, |
|
"step": 438 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 6.949806949806951e-05, |
|
"loss": 0.5535, |
|
"step": 439 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 6.911196911196911e-05, |
|
"loss": 0.5263, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 6.872586872586872e-05, |
|
"loss": 0.5342, |
|
"step": 441 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 6.833976833976834e-05, |
|
"loss": 0.4946, |
|
"step": 442 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 6.795366795366795e-05, |
|
"loss": 0.5402, |
|
"step": 443 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 6.756756756756757e-05, |
|
"loss": 0.5005, |
|
"step": 444 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 6.718146718146718e-05, |
|
"loss": 0.6038, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 6.67953667953668e-05, |
|
"loss": 0.5123, |
|
"step": 446 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 6.640926640926641e-05, |
|
"loss": 0.558, |
|
"step": 447 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 6.602316602316603e-05, |
|
"loss": 0.4858, |
|
"step": 448 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 6.563706563706564e-05, |
|
"loss": 0.6183, |
|
"step": 449 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 6.525096525096526e-05, |
|
"loss": 0.5093, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 6.486486486486487e-05, |
|
"loss": 0.4336, |
|
"step": 451 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 6.447876447876449e-05, |
|
"loss": 0.653, |
|
"step": 452 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 6.40926640926641e-05, |
|
"loss": 0.5675, |
|
"step": 453 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 6.37065637065637e-05, |
|
"loss": 0.5146, |
|
"step": 454 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 6.332046332046332e-05, |
|
"loss": 0.4988, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 2.17, |
|
"learning_rate": 6.293436293436293e-05, |
|
"loss": 0.5216, |
|
"step": 456 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 6.254826254826255e-05, |
|
"loss": 0.5886, |
|
"step": 457 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 6.216216216216216e-05, |
|
"loss": 0.5856, |
|
"step": 458 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 6.177606177606178e-05, |
|
"loss": 0.4837, |
|
"step": 459 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 6.138996138996139e-05, |
|
"loss": 0.6044, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 6.100386100386101e-05, |
|
"loss": 0.5276, |
|
"step": 461 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 6.061776061776062e-05, |
|
"loss": 0.4752, |
|
"step": 462 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 6.023166023166024e-05, |
|
"loss": 0.5702, |
|
"step": 463 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 5.984555984555985e-05, |
|
"loss": 0.4758, |
|
"step": 464 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 5.9459459459459466e-05, |
|
"loss": 0.573, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"eval_loss": 4.131713390350342, |
|
"eval_runtime": 7.3741, |
|
"eval_samples_per_second": 162.866, |
|
"eval_steps_per_second": 54.379, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 5.907335907335908e-05, |
|
"loss": 0.5373, |
|
"step": 466 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 5.8687258687258696e-05, |
|
"loss": 0.5611, |
|
"step": 467 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 5.83011583011583e-05, |
|
"loss": 0.5744, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 5.791505791505791e-05, |
|
"loss": 0.4818, |
|
"step": 469 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 5.752895752895753e-05, |
|
"loss": 0.4519, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 5.714285714285714e-05, |
|
"loss": 0.4295, |
|
"step": 471 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 5.6756756756756757e-05, |
|
"loss": 0.4755, |
|
"step": 472 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 5.637065637065637e-05, |
|
"loss": 0.501, |
|
"step": 473 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 5.5984555984555986e-05, |
|
"loss": 0.449, |
|
"step": 474 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 5.55984555984556e-05, |
|
"loss": 0.4914, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 5.5212355212355216e-05, |
|
"loss": 0.5153, |
|
"step": 476 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 5.482625482625483e-05, |
|
"loss": 0.5433, |
|
"step": 477 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"learning_rate": 5.4440154440154445e-05, |
|
"loss": 0.5248, |
|
"step": 478 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 5.405405405405406e-05, |
|
"loss": 0.5453, |
|
"step": 479 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 5.3667953667953675e-05, |
|
"loss": 0.5288, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 5.328185328185329e-05, |
|
"loss": 0.532, |
|
"step": 481 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 5.2895752895752905e-05, |
|
"loss": 0.5139, |
|
"step": 482 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 5.2509652509652506e-05, |
|
"loss": 0.5175, |
|
"step": 483 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 5.212355212355212e-05, |
|
"loss": 0.6227, |
|
"step": 484 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 5.1737451737451736e-05, |
|
"loss": 0.567, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 5.135135135135135e-05, |
|
"loss": 0.5636, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 5.0965250965250965e-05, |
|
"loss": 0.5367, |
|
"step": 487 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 5.057915057915058e-05, |
|
"loss": 0.6016, |
|
"step": 488 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 5.0193050193050195e-05, |
|
"loss": 0.4492, |
|
"step": 489 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 4.980694980694981e-05, |
|
"loss": 0.5329, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 4.9420849420849425e-05, |
|
"loss": 0.503, |
|
"step": 491 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 4.903474903474904e-05, |
|
"loss": 0.4799, |
|
"step": 492 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 4.8648648648648654e-05, |
|
"loss": 0.454, |
|
"step": 493 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 4.826254826254826e-05, |
|
"loss": 0.5555, |
|
"step": 494 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 4.787644787644788e-05, |
|
"loss": 0.5925, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 4.749034749034749e-05, |
|
"loss": 0.5557, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"eval_loss": 4.199349403381348, |
|
"eval_runtime": 7.3742, |
|
"eval_samples_per_second": 162.866, |
|
"eval_steps_per_second": 54.379, |
|
"step": 496 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 4.710424710424711e-05, |
|
"loss": 0.5157, |
|
"step": 497 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 4.671814671814672e-05, |
|
"loss": 0.5538, |
|
"step": 498 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 4.6332046332046336e-05, |
|
"loss": 0.6174, |
|
"step": 499 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 4.594594594594595e-05, |
|
"loss": 0.4592, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.39, |
|
"learning_rate": 4.555984555984556e-05, |
|
"loss": 0.4557, |
|
"step": 501 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.5173745173745174e-05, |
|
"loss": 0.5154, |
|
"step": 502 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.478764478764479e-05, |
|
"loss": 0.4909, |
|
"step": 503 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 4.4401544401544404e-05, |
|
"loss": 0.4755, |
|
"step": 504 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 4.401544401544402e-05, |
|
"loss": 0.592, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 4.3629343629343633e-05, |
|
"loss": 0.5014, |
|
"step": 506 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 4.324324324324325e-05, |
|
"loss": 0.4928, |
|
"step": 507 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 4.2857142857142856e-05, |
|
"loss": 0.5352, |
|
"step": 508 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 4.247104247104247e-05, |
|
"loss": 0.5457, |
|
"step": 509 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 4.2084942084942086e-05, |
|
"loss": 0.5182, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 4.16988416988417e-05, |
|
"loss": 0.527, |
|
"step": 511 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 4.1312741312741316e-05, |
|
"loss": 0.4961, |
|
"step": 512 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 4.092664092664093e-05, |
|
"loss": 0.4988, |
|
"step": 513 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 4.0540540540540545e-05, |
|
"loss": 0.5314, |
|
"step": 514 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 4.015444015444015e-05, |
|
"loss": 0.5523, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 3.976833976833977e-05, |
|
"loss": 0.4368, |
|
"step": 516 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 3.938223938223938e-05, |
|
"loss": 0.5184, |
|
"step": 517 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 3.8996138996139e-05, |
|
"loss": 0.6171, |
|
"step": 518 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 3.861003861003861e-05, |
|
"loss": 0.5357, |
|
"step": 519 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 3.822393822393823e-05, |
|
"loss": 0.5664, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 3.783783783783784e-05, |
|
"loss": 0.4805, |
|
"step": 521 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 3.745173745173745e-05, |
|
"loss": 0.4562, |
|
"step": 522 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 3.7065637065637065e-05, |
|
"loss": 0.5238, |
|
"step": 523 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"learning_rate": 3.667953667953668e-05, |
|
"loss": 0.4338, |
|
"step": 524 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 3.6293436293436295e-05, |
|
"loss": 0.5656, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 3.590733590733591e-05, |
|
"loss": 0.4496, |
|
"step": 526 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 3.5521235521235524e-05, |
|
"loss": 0.4997, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"eval_loss": 4.19807767868042, |
|
"eval_runtime": 7.3698, |
|
"eval_samples_per_second": 162.962, |
|
"eval_steps_per_second": 54.411, |
|
"step": 527 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 3.513513513513514e-05, |
|
"loss": 0.4531, |
|
"step": 528 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 3.4749034749034754e-05, |
|
"loss": 0.5048, |
|
"step": 529 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 3.436293436293436e-05, |
|
"loss": 0.5195, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 3.397683397683398e-05, |
|
"loss": 0.4885, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 3.359073359073359e-05, |
|
"loss": 0.6774, |
|
"step": 532 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 3.3204633204633207e-05, |
|
"loss": 0.4755, |
|
"step": 533 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 3.281853281853282e-05, |
|
"loss": 0.5164, |
|
"step": 534 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 3.2432432432432436e-05, |
|
"loss": 0.4748, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 3.204633204633205e-05, |
|
"loss": 0.5656, |
|
"step": 536 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 3.166023166023166e-05, |
|
"loss": 0.5167, |
|
"step": 537 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 3.1274131274131274e-05, |
|
"loss": 0.5101, |
|
"step": 538 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 3.088803088803089e-05, |
|
"loss": 0.4965, |
|
"step": 539 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 3.0501930501930504e-05, |
|
"loss": 0.5549, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 3.011583011583012e-05, |
|
"loss": 0.4873, |
|
"step": 541 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 2.9729729729729733e-05, |
|
"loss": 0.5093, |
|
"step": 542 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 2.9343629343629348e-05, |
|
"loss": 0.4897, |
|
"step": 543 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 2.8957528957528956e-05, |
|
"loss": 0.5128, |
|
"step": 544 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 2.857142857142857e-05, |
|
"loss": 0.4829, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 2.8185328185328186e-05, |
|
"loss": 0.4853, |
|
"step": 546 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 2.77992277992278e-05, |
|
"loss": 0.5499, |
|
"step": 547 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 2.7413127413127415e-05, |
|
"loss": 0.59, |
|
"step": 548 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 2.702702702702703e-05, |
|
"loss": 0.5169, |
|
"step": 549 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 2.6640926640926645e-05, |
|
"loss": 0.5642, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 2.6254826254826253e-05, |
|
"loss": 0.5745, |
|
"step": 551 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 2.5868725868725868e-05, |
|
"loss": 0.4845, |
|
"step": 552 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 2.5482625482625483e-05, |
|
"loss": 0.5198, |
|
"step": 553 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 2.5096525096525097e-05, |
|
"loss": 0.5402, |
|
"step": 554 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 2.4710424710424712e-05, |
|
"loss": 0.5122, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 2.4324324324324327e-05, |
|
"loss": 0.5769, |
|
"step": 556 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 2.393822393822394e-05, |
|
"loss": 0.5519, |
|
"step": 557 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 2.3552123552123553e-05, |
|
"loss": 0.465, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"eval_loss": 4.177217483520508, |
|
"eval_runtime": 7.3742, |
|
"eval_samples_per_second": 162.865, |
|
"eval_steps_per_second": 54.379, |
|
"step": 558 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 2.3166023166023168e-05, |
|
"loss": 0.4816, |
|
"step": 559 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 2.277992277992278e-05, |
|
"loss": 0.4428, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 2.2393822393822394e-05, |
|
"loss": 0.4969, |
|
"step": 561 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 2.200772200772201e-05, |
|
"loss": 0.4891, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 2.1621621621621624e-05, |
|
"loss": 0.4082, |
|
"step": 563 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 2.1235521235521236e-05, |
|
"loss": 0.4735, |
|
"step": 564 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 2.084942084942085e-05, |
|
"loss": 0.5121, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 2.0463320463320465e-05, |
|
"loss": 0.4696, |
|
"step": 566 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 2.0077220077220077e-05, |
|
"loss": 0.397, |
|
"step": 567 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.969111969111969e-05, |
|
"loss": 0.5271, |
|
"step": 568 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.9305019305019306e-05, |
|
"loss": 0.4974, |
|
"step": 569 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 1.891891891891892e-05, |
|
"loss": 0.4814, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 1.8532818532818533e-05, |
|
"loss": 0.5565, |
|
"step": 571 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 1.8146718146718147e-05, |
|
"loss": 0.4737, |
|
"step": 572 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 1.7760617760617762e-05, |
|
"loss": 0.4448, |
|
"step": 573 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 1.7374517374517377e-05, |
|
"loss": 0.4886, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 1.698841698841699e-05, |
|
"loss": 0.5197, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 1.6602316602316603e-05, |
|
"loss": 0.4688, |
|
"step": 576 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 1.6216216216216218e-05, |
|
"loss": 0.5649, |
|
"step": 577 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 1.583011583011583e-05, |
|
"loss": 0.5026, |
|
"step": 578 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 1.5444015444015444e-05, |
|
"loss": 0.5832, |
|
"step": 579 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.505791505791506e-05, |
|
"loss": 0.5995, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 1.4671814671814674e-05, |
|
"loss": 0.5342, |
|
"step": 581 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 0.5465, |
|
"step": 582 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 1.38996138996139e-05, |
|
"loss": 0.5165, |
|
"step": 583 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 1.3513513513513515e-05, |
|
"loss": 0.4594, |
|
"step": 584 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 1.3127413127413127e-05, |
|
"loss": 0.4448, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 1.2741312741312741e-05, |
|
"loss": 0.5148, |
|
"step": 586 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 1.2355212355212356e-05, |
|
"loss": 0.5255, |
|
"step": 587 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 1.196911196911197e-05, |
|
"loss": 0.4979, |
|
"step": 588 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 1.1583011583011584e-05, |
|
"loss": 0.4531, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"eval_loss": 4.171577453613281, |
|
"eval_runtime": 7.3719, |
|
"eval_samples_per_second": 162.916, |
|
"eval_steps_per_second": 54.396, |
|
"step": 589 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.1196911196911197e-05, |
|
"loss": 0.5339, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.0810810810810812e-05, |
|
"loss": 0.5242, |
|
"step": 591 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 1.0424710424710425e-05, |
|
"loss": 0.5266, |
|
"step": 592 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 1.0038610038610038e-05, |
|
"loss": 0.5188, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 9.652509652509653e-06, |
|
"loss": 0.459, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 9.266409266409266e-06, |
|
"loss": 0.3489, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 8.880308880308881e-06, |
|
"loss": 0.5022, |
|
"step": 596 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 8.494208494208494e-06, |
|
"loss": 0.4513, |
|
"step": 597 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 8.108108108108109e-06, |
|
"loss": 0.4338, |
|
"step": 598 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 7.722007722007722e-06, |
|
"loss": 0.5263, |
|
"step": 599 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 7.335907335907337e-06, |
|
"loss": 0.4898, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 6.94980694980695e-06, |
|
"loss": 0.5212, |
|
"step": 601 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 6.563706563706563e-06, |
|
"loss": 0.5355, |
|
"step": 602 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 6.177606177606178e-06, |
|
"loss": 0.5444, |
|
"step": 603 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 5.791505791505792e-06, |
|
"loss": 0.5875, |
|
"step": 604 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 5.405405405405406e-06, |
|
"loss": 0.4989, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 5.019305019305019e-06, |
|
"loss": 0.5159, |
|
"step": 606 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 4.633204633204633e-06, |
|
"loss": 0.3956, |
|
"step": 607 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 4.247104247104247e-06, |
|
"loss": 0.491, |
|
"step": 608 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 3.861003861003861e-06, |
|
"loss": 0.4454, |
|
"step": 609 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 3.474903474903475e-06, |
|
"loss": 0.4844, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 3.088803088803089e-06, |
|
"loss": 0.4972, |
|
"step": 611 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 2.702702702702703e-06, |
|
"loss": 0.4617, |
|
"step": 612 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 2.3166023166023166e-06, |
|
"loss": 0.4683, |
|
"step": 613 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 1.9305019305019305e-06, |
|
"loss": 0.5815, |
|
"step": 614 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 1.5444015444015445e-06, |
|
"loss": 0.4502, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 1.1583011583011583e-06, |
|
"loss": 0.477, |
|
"step": 616 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 7.722007722007723e-07, |
|
"loss": 0.4836, |
|
"step": 617 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 3.8610038610038613e-07, |
|
"loss": 0.4523, |
|
"step": 618 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 618, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 3.072196311043277e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|