colerobertson's picture
Training in progress, epoch 1
f5d2c47 verified
{
"best_metric": 0.8089887640449438,
"best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-16/checkpoint-240",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 240,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1,
"grad_norm": 1.2602354288101196,
"learning_rate": 6.163228300166189e-06,
"loss": 0.7018,
"step": 5
},
{
"epoch": 0.21,
"grad_norm": 1.1513686180114746,
"learning_rate": 1.2326456600332378e-05,
"loss": 0.6815,
"step": 10
},
{
"epoch": 0.31,
"grad_norm": 1.21480131149292,
"learning_rate": 1.8489684900498564e-05,
"loss": 0.6818,
"step": 15
},
{
"epoch": 0.42,
"grad_norm": 0.8887011408805847,
"learning_rate": 2.4652913200664756e-05,
"loss": 0.6556,
"step": 20
},
{
"epoch": 0.52,
"grad_norm": 0.8671120405197144,
"learning_rate": 3.081614150083095e-05,
"loss": 0.6284,
"step": 25
},
{
"epoch": 0.62,
"grad_norm": 0.6801092028617859,
"learning_rate": 3.697936980099713e-05,
"loss": 0.6166,
"step": 30
},
{
"epoch": 0.73,
"grad_norm": 2.9359517097473145,
"learning_rate": 4.314259810116332e-05,
"loss": 0.5407,
"step": 35
},
{
"epoch": 0.83,
"grad_norm": 1.6087604761123657,
"learning_rate": 4.930582640132951e-05,
"loss": 0.6755,
"step": 40
},
{
"epoch": 0.94,
"grad_norm": 3.7170913219451904,
"learning_rate": 5.54690547014957e-05,
"loss": 0.6359,
"step": 45
},
{
"epoch": 1.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.7614002227783203,
"eval_runtime": 1.3801,
"eval_samples_per_second": 46.373,
"eval_steps_per_second": 5.797,
"step": 48
},
{
"epoch": 1.04,
"grad_norm": 4.872888565063477,
"learning_rate": 5.889307042381025e-05,
"loss": 0.7115,
"step": 50
},
{
"epoch": 1.15,
"grad_norm": 0.9400704503059387,
"learning_rate": 5.820826727934734e-05,
"loss": 0.7024,
"step": 55
},
{
"epoch": 1.25,
"grad_norm": 2.040548086166382,
"learning_rate": 5.752346413488443e-05,
"loss": 0.4111,
"step": 60
},
{
"epoch": 1.35,
"grad_norm": 0.7833982706069946,
"learning_rate": 5.6838660990421516e-05,
"loss": 0.5595,
"step": 65
},
{
"epoch": 1.46,
"grad_norm": 1.4219136238098145,
"learning_rate": 5.615385784595861e-05,
"loss": 0.4831,
"step": 70
},
{
"epoch": 1.56,
"grad_norm": Infinity,
"learning_rate": 5.560601533038828e-05,
"loss": 0.7837,
"step": 75
},
{
"epoch": 1.67,
"grad_norm": 2.069016218185425,
"learning_rate": 5.492121218592537e-05,
"loss": 0.62,
"step": 80
},
{
"epoch": 1.77,
"grad_norm": 1.6181252002716064,
"learning_rate": 5.423640904146246e-05,
"loss": 0.5911,
"step": 85
},
{
"epoch": 1.88,
"grad_norm": 1.8916351795196533,
"learning_rate": 5.355160589699955e-05,
"loss": 0.5905,
"step": 90
},
{
"epoch": 1.98,
"grad_norm": 3.4919614791870117,
"learning_rate": 5.286680275253664e-05,
"loss": 0.5972,
"step": 95
},
{
"epoch": 2.0,
"eval_f1": 0.74,
"eval_loss": 0.6621341705322266,
"eval_runtime": 1.3734,
"eval_samples_per_second": 46.601,
"eval_steps_per_second": 5.825,
"step": 96
},
{
"epoch": 2.08,
"grad_norm": 1.276584267616272,
"learning_rate": 5.231896023696631e-05,
"loss": 0.5767,
"step": 100
},
{
"epoch": 2.19,
"grad_norm": 1.1047593355178833,
"learning_rate": 5.163415709250341e-05,
"loss": 0.4715,
"step": 105
},
{
"epoch": 2.29,
"grad_norm": 1.551737666130066,
"learning_rate": 5.0949353948040496e-05,
"loss": 0.5687,
"step": 110
},
{
"epoch": 2.4,
"grad_norm": 1.3372799158096313,
"learning_rate": 5.0264550803577585e-05,
"loss": 0.5399,
"step": 115
},
{
"epoch": 2.5,
"grad_norm": 4.043832778930664,
"learning_rate": 4.9716708288007255e-05,
"loss": 0.5178,
"step": 120
},
{
"epoch": 2.6,
"grad_norm": 5.607926845550537,
"learning_rate": 4.9031905143544343e-05,
"loss": 0.4201,
"step": 125
},
{
"epoch": 2.71,
"grad_norm": 10.094707489013672,
"learning_rate": 4.834710199908144e-05,
"loss": 0.6339,
"step": 130
},
{
"epoch": 2.81,
"grad_norm": 2.790902614593506,
"learning_rate": 4.766229885461853e-05,
"loss": 0.3564,
"step": 135
},
{
"epoch": 2.92,
"grad_norm": 8.226430892944336,
"learning_rate": 4.6977495710155616e-05,
"loss": 0.2918,
"step": 140
},
{
"epoch": 3.0,
"eval_f1": 0.72,
"eval_loss": 0.8946094512939453,
"eval_runtime": 1.3813,
"eval_samples_per_second": 46.334,
"eval_steps_per_second": 5.792,
"step": 144
},
{
"epoch": 3.02,
"grad_norm": 3.521246910095215,
"learning_rate": 4.6292692565692705e-05,
"loss": 0.4787,
"step": 145
},
{
"epoch": 3.12,
"grad_norm": Infinity,
"learning_rate": 4.574485005012238e-05,
"loss": 0.3851,
"step": 150
},
{
"epoch": 3.23,
"grad_norm": 3.6489925384521484,
"learning_rate": 4.506004690565947e-05,
"loss": 0.2152,
"step": 155
},
{
"epoch": 3.33,
"grad_norm": 9.47097396850586,
"learning_rate": 4.451220439008914e-05,
"loss": 0.6503,
"step": 160
},
{
"epoch": 3.44,
"grad_norm": 1.4761028289794922,
"learning_rate": 4.382740124562623e-05,
"loss": 0.177,
"step": 165
},
{
"epoch": 3.54,
"grad_norm": 15.07903003692627,
"learning_rate": 4.314259810116332e-05,
"loss": 0.3834,
"step": 170
},
{
"epoch": 3.65,
"grad_norm": 98.74039459228516,
"learning_rate": 4.245779495670041e-05,
"loss": 0.816,
"step": 175
},
{
"epoch": 3.75,
"grad_norm": 0.6765172481536865,
"learning_rate": 4.17729918122375e-05,
"loss": 0.3177,
"step": 180
},
{
"epoch": 3.85,
"grad_norm": 1.5212448835372925,
"learning_rate": 4.108818866777459e-05,
"loss": 0.3593,
"step": 185
},
{
"epoch": 3.96,
"grad_norm": 27.127269744873047,
"learning_rate": 4.040338552331168e-05,
"loss": 0.3041,
"step": 190
},
{
"epoch": 4.0,
"eval_f1": 0.7692307692307693,
"eval_loss": 1.0041449069976807,
"eval_runtime": 1.3873,
"eval_samples_per_second": 46.134,
"eval_steps_per_second": 5.767,
"step": 192
},
{
"epoch": 4.06,
"grad_norm": 0.7140729427337646,
"learning_rate": 3.9718582378848774e-05,
"loss": 0.2064,
"step": 195
},
{
"epoch": 4.17,
"grad_norm": 4.367918491363525,
"learning_rate": 3.903377923438586e-05,
"loss": 0.365,
"step": 200
},
{
"epoch": 4.27,
"grad_norm": 0.4135078489780426,
"learning_rate": 3.834897608992295e-05,
"loss": 0.3944,
"step": 205
},
{
"epoch": 4.38,
"grad_norm": 88.720458984375,
"learning_rate": 3.766417294546004e-05,
"loss": 0.3774,
"step": 210
},
{
"epoch": 4.48,
"grad_norm": 1.5230985879898071,
"learning_rate": 3.697936980099713e-05,
"loss": 0.0844,
"step": 215
},
{
"epoch": 4.58,
"grad_norm": Infinity,
"learning_rate": 3.6431527285426805e-05,
"loss": 0.3702,
"step": 220
},
{
"epoch": 4.69,
"grad_norm": 39.243804931640625,
"learning_rate": 3.5746724140963894e-05,
"loss": 0.3381,
"step": 225
},
{
"epoch": 4.79,
"grad_norm": 0.23127637803554535,
"learning_rate": 3.506192099650098e-05,
"loss": 0.03,
"step": 230
},
{
"epoch": 4.9,
"grad_norm": 23.879899978637695,
"learning_rate": 3.451407848093066e-05,
"loss": 0.1969,
"step": 235
},
{
"epoch": 5.0,
"grad_norm": 0.2918482720851898,
"learning_rate": 3.382927533646775e-05,
"loss": 0.2145,
"step": 240
},
{
"epoch": 5.0,
"eval_f1": 0.8089887640449438,
"eval_loss": 1.1479806900024414,
"eval_runtime": 1.3695,
"eval_samples_per_second": 46.731,
"eval_steps_per_second": 5.841,
"step": 240
}
],
"logging_steps": 5,
"max_steps": 480,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 3675891927575280.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"learning_rate": 5.916699168159541e-05,
"per_device_train_batch_size": 4
}
}