colerobertson's picture
Training in progress, epoch 1
d89240d verified
raw
history blame
7.17 kB
{
"best_metric": 0.7326732673267327,
"best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-1/checkpoint-16",
"epoch": 9.0,
"eval_steps": 500,
"global_step": 144,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.31,
"grad_norm": 0.8568354845046997,
"learning_rate": 2.8711716200450114e-06,
"loss": 0.6977,
"step": 5
},
{
"epoch": 0.62,
"grad_norm": 0.7982807755470276,
"learning_rate": 5.742343240090023e-06,
"loss": 0.6961,
"step": 10
},
{
"epoch": 0.94,
"grad_norm": 0.32968151569366455,
"learning_rate": 8.613514860135034e-06,
"loss": 0.6801,
"step": 15
},
{
"epoch": 1.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.6840133666992188,
"eval_runtime": 1.3445,
"eval_samples_per_second": 47.602,
"eval_steps_per_second": 5.95,
"step": 16
},
{
"epoch": 1.25,
"grad_norm": 1.6010539531707764,
"learning_rate": 8.932533929028925e-06,
"loss": 0.6633,
"step": 20
},
{
"epoch": 1.56,
"grad_norm": 0.5679539442062378,
"learning_rate": 8.613514860135034e-06,
"loss": 0.6444,
"step": 25
},
{
"epoch": 1.88,
"grad_norm": 0.4244280755519867,
"learning_rate": 8.294495791241145e-06,
"loss": 0.6419,
"step": 30
},
{
"epoch": 2.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.6817398071289062,
"eval_runtime": 1.3677,
"eval_samples_per_second": 46.795,
"eval_steps_per_second": 5.849,
"step": 32
},
{
"epoch": 2.19,
"grad_norm": 1.0901663303375244,
"learning_rate": 7.975476722347254e-06,
"loss": 0.6205,
"step": 35
},
{
"epoch": 2.5,
"grad_norm": 0.7114349603652954,
"learning_rate": 7.656457653453365e-06,
"loss": 0.6425,
"step": 40
},
{
"epoch": 2.81,
"grad_norm": 1.0210336446762085,
"learning_rate": 7.337438584559474e-06,
"loss": 0.5994,
"step": 45
},
{
"epoch": 3.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.6920928955078125,
"eval_runtime": 1.3659,
"eval_samples_per_second": 46.855,
"eval_steps_per_second": 5.857,
"step": 48
},
{
"epoch": 3.12,
"grad_norm": 0.5598271489143372,
"learning_rate": 7.018419515665583e-06,
"loss": 0.5898,
"step": 50
},
{
"epoch": 3.44,
"grad_norm": 0.8800140023231506,
"learning_rate": 6.6994004467716935e-06,
"loss": 0.5947,
"step": 55
},
{
"epoch": 3.75,
"grad_norm": 0.5016202330589294,
"learning_rate": 6.3803813778778036e-06,
"loss": 0.599,
"step": 60
},
{
"epoch": 4.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.7060089111328125,
"eval_runtime": 1.3676,
"eval_samples_per_second": 46.796,
"eval_steps_per_second": 5.849,
"step": 64
},
{
"epoch": 4.06,
"grad_norm": 0.6695349812507629,
"learning_rate": 6.061362308983913e-06,
"loss": 0.6271,
"step": 65
},
{
"epoch": 4.38,
"grad_norm": 0.7742494344711304,
"learning_rate": 5.742343240090023e-06,
"loss": 0.5761,
"step": 70
},
{
"epoch": 4.69,
"grad_norm": 0.8342304229736328,
"learning_rate": 5.423324171196133e-06,
"loss": 0.5882,
"step": 75
},
{
"epoch": 5.0,
"grad_norm": 0.9266335964202881,
"learning_rate": 5.104305102302243e-06,
"loss": 0.6051,
"step": 80
},
{
"epoch": 5.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.7071685791015625,
"eval_runtime": 1.3778,
"eval_samples_per_second": 46.45,
"eval_steps_per_second": 5.806,
"step": 80
},
{
"epoch": 5.31,
"grad_norm": 0.7613723874092102,
"learning_rate": 4.785286033408353e-06,
"loss": 0.604,
"step": 85
},
{
"epoch": 5.62,
"grad_norm": 0.4511677920818329,
"learning_rate": 4.466266964514462e-06,
"loss": 0.5968,
"step": 90
},
{
"epoch": 5.94,
"grad_norm": 0.5487041473388672,
"learning_rate": 4.147247895620572e-06,
"loss": 0.5792,
"step": 95
},
{
"epoch": 6.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.7030601501464844,
"eval_runtime": 1.3948,
"eval_samples_per_second": 45.886,
"eval_steps_per_second": 5.736,
"step": 96
},
{
"epoch": 6.25,
"grad_norm": 0.8304491639137268,
"learning_rate": 3.8282288267266825e-06,
"loss": 0.5467,
"step": 100
},
{
"epoch": 6.56,
"grad_norm": 0.4213624894618988,
"learning_rate": 3.5092097578327917e-06,
"loss": 0.6636,
"step": 105
},
{
"epoch": 6.88,
"grad_norm": 0.8720755577087402,
"learning_rate": 3.1901906889389018e-06,
"loss": 0.5839,
"step": 110
},
{
"epoch": 7.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.70111083984375,
"eval_runtime": 1.3628,
"eval_samples_per_second": 46.964,
"eval_steps_per_second": 5.87,
"step": 112
},
{
"epoch": 7.19,
"grad_norm": 0.5998705625534058,
"learning_rate": 2.8711716200450114e-06,
"loss": 0.5257,
"step": 115
},
{
"epoch": 7.5,
"grad_norm": 1.3016043901443481,
"learning_rate": 2.5521525511511215e-06,
"loss": 0.5912,
"step": 120
},
{
"epoch": 7.81,
"grad_norm": 0.46050915122032166,
"learning_rate": 2.233133482257231e-06,
"loss": 0.5738,
"step": 125
},
{
"epoch": 8.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.7000617980957031,
"eval_runtime": 1.3424,
"eval_samples_per_second": 47.676,
"eval_steps_per_second": 5.96,
"step": 128
},
{
"epoch": 8.12,
"grad_norm": 0.729812502861023,
"learning_rate": 1.9141144133633412e-06,
"loss": 0.5742,
"step": 130
},
{
"epoch": 8.44,
"grad_norm": 0.5653175711631775,
"learning_rate": 1.5950953444694509e-06,
"loss": 0.5029,
"step": 135
},
{
"epoch": 8.75,
"grad_norm": 0.6748834252357483,
"learning_rate": 1.2760762755755608e-06,
"loss": 0.5754,
"step": 140
},
{
"epoch": 9.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.7019309997558594,
"eval_runtime": 1.5088,
"eval_samples_per_second": 42.418,
"eval_steps_per_second": 5.302,
"step": 144
}
],
"logging_steps": 5,
"max_steps": 160,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 7846324158364704.0,
"train_batch_size": 12,
"trial_name": null,
"trial_params": {
"learning_rate": 9.187749184144037e-06,
"per_device_train_batch_size": 12
}
}