File size: 4,075 Bytes
bac01b2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
{
"best_metric": 0.7326732673267327,
"best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-4/checkpoint-96",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 96,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1,
"grad_norm": 1.2733114957809448,
"learning_rate": 2.2702186710865246e-07,
"loss": 0.7025,
"step": 5
},
{
"epoch": 0.21,
"grad_norm": 1.243804931640625,
"learning_rate": 4.5404373421730493e-07,
"loss": 0.6974,
"step": 10
},
{
"epoch": 0.31,
"grad_norm": 1.7711552381515503,
"learning_rate": 6.810656013259573e-07,
"loss": 0.696,
"step": 15
},
{
"epoch": 0.42,
"grad_norm": 1.1453403234481812,
"learning_rate": 9.080874684346099e-07,
"loss": 0.6989,
"step": 20
},
{
"epoch": 0.52,
"grad_norm": 1.2729355096817017,
"learning_rate": 1.1351093355432624e-06,
"loss": 0.6968,
"step": 25
},
{
"epoch": 0.62,
"grad_norm": 1.1592165231704712,
"learning_rate": 1.3621312026519146e-06,
"loss": 0.6959,
"step": 30
},
{
"epoch": 0.73,
"grad_norm": 1.1798148155212402,
"learning_rate": 1.589153069760567e-06,
"loss": 0.6952,
"step": 35
},
{
"epoch": 0.83,
"grad_norm": 2.1216671466827393,
"learning_rate": 1.8161749368692197e-06,
"loss": 0.6886,
"step": 40
},
{
"epoch": 0.94,
"grad_norm": 1.3416370153427124,
"learning_rate": 2.043196803977872e-06,
"loss": 0.6864,
"step": 45
},
{
"epoch": 1.0,
"eval_f1": 0.72,
"eval_loss": 0.688262939453125,
"eval_runtime": 1.3468,
"eval_samples_per_second": 47.521,
"eval_steps_per_second": 5.94,
"step": 48
},
{
"epoch": 1.04,
"grad_norm": 2.1856281757354736,
"learning_rate": 2.169320063482679e-06,
"loss": 0.6917,
"step": 50
},
{
"epoch": 1.15,
"grad_norm": 1.4077153205871582,
"learning_rate": 2.1440954115817176e-06,
"loss": 0.6884,
"step": 55
},
{
"epoch": 1.25,
"grad_norm": 2.1792664527893066,
"learning_rate": 2.1188707596807562e-06,
"loss": 0.6668,
"step": 60
},
{
"epoch": 1.35,
"grad_norm": 1.0386197566986084,
"learning_rate": 2.093646107779795e-06,
"loss": 0.6694,
"step": 65
},
{
"epoch": 1.46,
"grad_norm": 2.0565919876098633,
"learning_rate": 2.0684214558788335e-06,
"loss": 0.6561,
"step": 70
},
{
"epoch": 1.56,
"grad_norm": 1.2978509664535522,
"learning_rate": 2.043196803977872e-06,
"loss": 0.6789,
"step": 75
},
{
"epoch": 1.67,
"grad_norm": 2.058328628540039,
"learning_rate": 2.0179721520769108e-06,
"loss": 0.6633,
"step": 80
},
{
"epoch": 1.77,
"grad_norm": 0.6023226976394653,
"learning_rate": 1.9927475001759494e-06,
"loss": 0.6655,
"step": 85
},
{
"epoch": 1.88,
"grad_norm": 0.5510762929916382,
"learning_rate": 1.967522848274988e-06,
"loss": 0.6622,
"step": 90
},
{
"epoch": 1.98,
"grad_norm": 1.098602533340454,
"learning_rate": 1.9422981963740267e-06,
"loss": 0.6633,
"step": 95
},
{
"epoch": 2.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.6816024780273438,
"eval_runtime": 1.3765,
"eval_samples_per_second": 46.493,
"eval_steps_per_second": 5.812,
"step": 96
}
],
"logging_steps": 5,
"max_steps": 480,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 1442567462539200.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"learning_rate": 2.1794099242430636e-06,
"per_device_train_batch_size": 4
}
}
|