colerobertson's picture
Training in progress, epoch 1
bac01b2 verified
raw
history blame
No virus
7.57 kB
{
"best_metric": 0.7326732673267327,
"best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-4/checkpoint-96",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 192,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1,
"grad_norm": 1.2733114957809448,
"learning_rate": 2.2702186710865246e-07,
"loss": 0.7025,
"step": 5
},
{
"epoch": 0.21,
"grad_norm": 1.243804931640625,
"learning_rate": 4.5404373421730493e-07,
"loss": 0.6974,
"step": 10
},
{
"epoch": 0.31,
"grad_norm": 1.7711552381515503,
"learning_rate": 6.810656013259573e-07,
"loss": 0.696,
"step": 15
},
{
"epoch": 0.42,
"grad_norm": 1.1453403234481812,
"learning_rate": 9.080874684346099e-07,
"loss": 0.6989,
"step": 20
},
{
"epoch": 0.52,
"grad_norm": 1.2729355096817017,
"learning_rate": 1.1351093355432624e-06,
"loss": 0.6968,
"step": 25
},
{
"epoch": 0.62,
"grad_norm": 1.1592165231704712,
"learning_rate": 1.3621312026519146e-06,
"loss": 0.6959,
"step": 30
},
{
"epoch": 0.73,
"grad_norm": 1.1798148155212402,
"learning_rate": 1.589153069760567e-06,
"loss": 0.6952,
"step": 35
},
{
"epoch": 0.83,
"grad_norm": 2.1216671466827393,
"learning_rate": 1.8161749368692197e-06,
"loss": 0.6886,
"step": 40
},
{
"epoch": 0.94,
"grad_norm": 1.3416370153427124,
"learning_rate": 2.043196803977872e-06,
"loss": 0.6864,
"step": 45
},
{
"epoch": 1.0,
"eval_f1": 0.72,
"eval_loss": 0.688262939453125,
"eval_runtime": 1.3468,
"eval_samples_per_second": 47.521,
"eval_steps_per_second": 5.94,
"step": 48
},
{
"epoch": 1.04,
"grad_norm": 2.1856281757354736,
"learning_rate": 2.169320063482679e-06,
"loss": 0.6917,
"step": 50
},
{
"epoch": 1.15,
"grad_norm": 1.4077153205871582,
"learning_rate": 2.1440954115817176e-06,
"loss": 0.6884,
"step": 55
},
{
"epoch": 1.25,
"grad_norm": 2.1792664527893066,
"learning_rate": 2.1188707596807562e-06,
"loss": 0.6668,
"step": 60
},
{
"epoch": 1.35,
"grad_norm": 1.0386197566986084,
"learning_rate": 2.093646107779795e-06,
"loss": 0.6694,
"step": 65
},
{
"epoch": 1.46,
"grad_norm": 2.0565919876098633,
"learning_rate": 2.0684214558788335e-06,
"loss": 0.6561,
"step": 70
},
{
"epoch": 1.56,
"grad_norm": 1.2978509664535522,
"learning_rate": 2.043196803977872e-06,
"loss": 0.6789,
"step": 75
},
{
"epoch": 1.67,
"grad_norm": 2.058328628540039,
"learning_rate": 2.0179721520769108e-06,
"loss": 0.6633,
"step": 80
},
{
"epoch": 1.77,
"grad_norm": 0.6023226976394653,
"learning_rate": 1.9927475001759494e-06,
"loss": 0.6655,
"step": 85
},
{
"epoch": 1.88,
"grad_norm": 0.5510762929916382,
"learning_rate": 1.967522848274988e-06,
"loss": 0.6622,
"step": 90
},
{
"epoch": 1.98,
"grad_norm": 1.098602533340454,
"learning_rate": 1.9422981963740267e-06,
"loss": 0.6633,
"step": 95
},
{
"epoch": 2.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.6816024780273438,
"eval_runtime": 1.3765,
"eval_samples_per_second": 46.493,
"eval_steps_per_second": 5.812,
"step": 96
},
{
"epoch": 2.08,
"grad_norm": 0.9589098691940308,
"learning_rate": 1.9170735444730654e-06,
"loss": 0.659,
"step": 100
},
{
"epoch": 2.19,
"grad_norm": 1.070695161819458,
"learning_rate": 1.8918488925721038e-06,
"loss": 0.6313,
"step": 105
},
{
"epoch": 2.29,
"grad_norm": 0.9913639426231384,
"learning_rate": 1.8666242406711424e-06,
"loss": 0.6652,
"step": 110
},
{
"epoch": 2.4,
"grad_norm": 1.0632878541946411,
"learning_rate": 1.841399588770181e-06,
"loss": 0.673,
"step": 115
},
{
"epoch": 2.5,
"grad_norm": 2.1036579608917236,
"learning_rate": 1.8161749368692197e-06,
"loss": 0.6451,
"step": 120
},
{
"epoch": 2.6,
"grad_norm": 1.08384108543396,
"learning_rate": 1.7909502849682583e-06,
"loss": 0.6322,
"step": 125
},
{
"epoch": 2.71,
"grad_norm": 0.9407000541687012,
"learning_rate": 1.765725633067297e-06,
"loss": 0.6755,
"step": 130
},
{
"epoch": 2.81,
"grad_norm": 0.9016568660736084,
"learning_rate": 1.7405009811663356e-06,
"loss": 0.5985,
"step": 135
},
{
"epoch": 2.92,
"grad_norm": 1.1134448051452637,
"learning_rate": 1.7152763292653743e-06,
"loss": 0.603,
"step": 140
},
{
"epoch": 3.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.6800689697265625,
"eval_runtime": 1.3861,
"eval_samples_per_second": 46.173,
"eval_steps_per_second": 5.772,
"step": 144
},
{
"epoch": 3.02,
"grad_norm": 0.7627719640731812,
"learning_rate": 1.6900516773644127e-06,
"loss": 0.6557,
"step": 145
},
{
"epoch": 3.12,
"grad_norm": 0.9291415214538574,
"learning_rate": 1.6648270254634511e-06,
"loss": 0.6219,
"step": 150
},
{
"epoch": 3.23,
"grad_norm": 0.9248765707015991,
"learning_rate": 1.6396023735624898e-06,
"loss": 0.6325,
"step": 155
},
{
"epoch": 3.33,
"grad_norm": 0.9842573404312134,
"learning_rate": 1.6143777216615284e-06,
"loss": 0.6521,
"step": 160
},
{
"epoch": 3.44,
"grad_norm": 0.8689214587211609,
"learning_rate": 1.589153069760567e-06,
"loss": 0.5929,
"step": 165
},
{
"epoch": 3.54,
"grad_norm": 1.0012000799179077,
"learning_rate": 1.5639284178596057e-06,
"loss": 0.584,
"step": 170
},
{
"epoch": 3.65,
"grad_norm": 0.7438368797302246,
"learning_rate": 1.5387037659586443e-06,
"loss": 0.6813,
"step": 175
},
{
"epoch": 3.75,
"grad_norm": 1.8603870868682861,
"learning_rate": 1.513479114057683e-06,
"loss": 0.6099,
"step": 180
},
{
"epoch": 3.85,
"grad_norm": 0.9918416738510132,
"learning_rate": 1.4882544621567216e-06,
"loss": 0.6192,
"step": 185
},
{
"epoch": 3.96,
"grad_norm": 1.9146322011947632,
"learning_rate": 1.4630298102557603e-06,
"loss": 0.6472,
"step": 190
},
{
"epoch": 4.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.6818161010742188,
"eval_runtime": 1.3841,
"eval_samples_per_second": 46.239,
"eval_steps_per_second": 5.78,
"step": 192
}
],
"logging_steps": 5,
"max_steps": 480,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 2891755054954176.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"learning_rate": 2.1794099242430636e-06,
"per_device_train_batch_size": 4
}
}