colerobertson's picture
Training in progress, epoch 1
f5d2c47 verified
{
"best_metric": 0.7474747474747475,
"best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-13/checkpoint-192",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 192,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1,
"grad_norm": 1.2512309551239014,
"learning_rate": 1.034188035217011e-05,
"loss": 0.7013,
"step": 5
},
{
"epoch": 0.21,
"grad_norm": 1.0938308238983154,
"learning_rate": 2.068376070434022e-05,
"loss": 0.6709,
"step": 10
},
{
"epoch": 0.31,
"grad_norm": 1.029103398323059,
"learning_rate": 3.102564105651033e-05,
"loss": 0.676,
"step": 15
},
{
"epoch": 0.42,
"grad_norm": 0.7623715400695801,
"learning_rate": 4.136752140868044e-05,
"loss": 0.6371,
"step": 20
},
{
"epoch": 0.52,
"grad_norm": 0.6755096912384033,
"learning_rate": 5.170940176085055e-05,
"loss": 0.6148,
"step": 25
},
{
"epoch": 0.62,
"grad_norm": 0.48364195227622986,
"learning_rate": 6.205128211302065e-05,
"loss": 0.609,
"step": 30
},
{
"epoch": 0.73,
"grad_norm": 3.724107503890991,
"learning_rate": 7.239316246519077e-05,
"loss": 0.5257,
"step": 35
},
{
"epoch": 0.83,
"grad_norm": 1.4992948770523071,
"learning_rate": 8.273504281736088e-05,
"loss": 0.7212,
"step": 40
},
{
"epoch": 0.94,
"grad_norm": 4.220996856689453,
"learning_rate": 9.307692316953099e-05,
"loss": 0.6503,
"step": 45
},
{
"epoch": 1.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.7372970581054688,
"eval_runtime": 1.384,
"eval_samples_per_second": 46.242,
"eval_steps_per_second": 5.78,
"step": 48
},
{
"epoch": 1.04,
"grad_norm": 4.6990180015563965,
"learning_rate": 9.882241225406994e-05,
"loss": 0.6895,
"step": 50
},
{
"epoch": 1.15,
"grad_norm": 1.5386626720428467,
"learning_rate": 9.767331443716214e-05,
"loss": 0.6802,
"step": 55
},
{
"epoch": 1.25,
"grad_norm": 1.7925231456756592,
"learning_rate": 9.652421662025435e-05,
"loss": 0.4231,
"step": 60
},
{
"epoch": 1.35,
"grad_norm": 1.0101344585418701,
"learning_rate": 9.560493836672811e-05,
"loss": 0.5768,
"step": 65
},
{
"epoch": 1.46,
"grad_norm": 1.2472072839736938,
"learning_rate": 9.445584054982033e-05,
"loss": 0.5095,
"step": 70
},
{
"epoch": 1.56,
"grad_norm": 3.8222692012786865,
"learning_rate": 9.330674273291253e-05,
"loss": 0.8618,
"step": 75
},
{
"epoch": 1.67,
"grad_norm": 2.367830276489258,
"learning_rate": 9.215764491600475e-05,
"loss": 0.6321,
"step": 80
},
{
"epoch": 1.77,
"grad_norm": 1.2242908477783203,
"learning_rate": 9.100854709909696e-05,
"loss": 0.6086,
"step": 85
},
{
"epoch": 1.88,
"grad_norm": 1.1424415111541748,
"learning_rate": 8.985944928218916e-05,
"loss": 0.6172,
"step": 90
},
{
"epoch": 1.98,
"grad_norm": 1.9207895994186401,
"learning_rate": 8.871035146528138e-05,
"loss": 0.6318,
"step": 95
},
{
"epoch": 2.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.6706695556640625,
"eval_runtime": 1.3744,
"eval_samples_per_second": 46.567,
"eval_steps_per_second": 5.821,
"step": 96
},
{
"epoch": 2.08,
"grad_norm": 1.3165736198425293,
"learning_rate": 8.756125364837358e-05,
"loss": 0.6359,
"step": 100
},
{
"epoch": 2.19,
"grad_norm": 1.2420493364334106,
"learning_rate": 8.64121558314658e-05,
"loss": 0.5142,
"step": 105
},
{
"epoch": 2.29,
"grad_norm": 1.2290922403335571,
"learning_rate": 8.526305801455801e-05,
"loss": 0.643,
"step": 110
},
{
"epoch": 2.4,
"grad_norm": 1.1532477140426636,
"learning_rate": 8.434377976103179e-05,
"loss": 0.6774,
"step": 115
},
{
"epoch": 2.5,
"grad_norm": 6.360584259033203,
"learning_rate": 8.319468194412399e-05,
"loss": 0.5839,
"step": 120
},
{
"epoch": 2.6,
"grad_norm": 7.5169677734375,
"learning_rate": 8.204558412721619e-05,
"loss": 0.5138,
"step": 125
},
{
"epoch": 2.71,
"grad_norm": 14.515143394470215,
"learning_rate": 8.089648631030841e-05,
"loss": 0.6207,
"step": 130
},
{
"epoch": 2.81,
"grad_norm": 2.682206153869629,
"learning_rate": 7.974738849340061e-05,
"loss": 0.4247,
"step": 135
},
{
"epoch": 2.92,
"grad_norm": 5.204345226287842,
"learning_rate": 7.88281102398744e-05,
"loss": 0.3698,
"step": 140
},
{
"epoch": 3.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.9202766418457031,
"eval_runtime": 1.4182,
"eval_samples_per_second": 45.129,
"eval_steps_per_second": 5.641,
"step": 144
},
{
"epoch": 3.02,
"grad_norm": 6.481989860534668,
"learning_rate": 7.76790124229666e-05,
"loss": 0.6196,
"step": 145
},
{
"epoch": 3.12,
"grad_norm": Infinity,
"learning_rate": 7.675973416944037e-05,
"loss": 0.4192,
"step": 150
},
{
"epoch": 3.23,
"grad_norm": 2.9529616832733154,
"learning_rate": 7.561063635253257e-05,
"loss": 0.2996,
"step": 155
},
{
"epoch": 3.33,
"grad_norm": 13.315085411071777,
"learning_rate": 7.469135809900635e-05,
"loss": 0.6014,
"step": 160
},
{
"epoch": 3.44,
"grad_norm": 3.830242156982422,
"learning_rate": 7.354226028209855e-05,
"loss": 0.2117,
"step": 165
},
{
"epoch": 3.54,
"grad_norm": 26.10947608947754,
"learning_rate": 7.239316246519077e-05,
"loss": 0.2918,
"step": 170
},
{
"epoch": 3.65,
"grad_norm": 55.73325729370117,
"learning_rate": 7.124406464828297e-05,
"loss": 0.775,
"step": 175
},
{
"epoch": 3.75,
"grad_norm": 1.9401394128799438,
"learning_rate": 7.009496683137518e-05,
"loss": 0.4163,
"step": 180
},
{
"epoch": 3.85,
"grad_norm": 0.818503201007843,
"learning_rate": 6.89458690144674e-05,
"loss": 0.3559,
"step": 185
},
{
"epoch": 3.96,
"grad_norm": 6.375278472900391,
"learning_rate": 6.77967711975596e-05,
"loss": 0.5548,
"step": 190
},
{
"epoch": 4.0,
"eval_f1": 0.7474747474747475,
"eval_loss": 1.170898675918579,
"eval_runtime": 1.3939,
"eval_samples_per_second": 45.913,
"eval_steps_per_second": 5.739,
"step": 192
}
],
"logging_steps": 5,
"max_steps": 480,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 2891755054954176.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"learning_rate": 9.928205138083305e-05,
"per_device_train_batch_size": 4
}
}