colerobertson's picture
Training in progress, epoch 1
fef3774 verified
{
"best_metric": 0.7865168539325843,
"best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-12/checkpoint-240",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 240,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1,
"grad_norm": 1.2605946063995361,
"learning_rate": 5.868670588038625e-06,
"loss": 0.7019,
"step": 5
},
{
"epoch": 0.21,
"grad_norm": 1.1554573774337769,
"learning_rate": 1.173734117607725e-05,
"loss": 0.6822,
"step": 10
},
{
"epoch": 0.31,
"grad_norm": 1.2631292343139648,
"learning_rate": 1.7606011764115876e-05,
"loss": 0.6823,
"step": 15
},
{
"epoch": 0.42,
"grad_norm": 0.8986643552780151,
"learning_rate": 2.34746823521545e-05,
"loss": 0.6573,
"step": 20
},
{
"epoch": 0.52,
"grad_norm": 0.8833038210868835,
"learning_rate": 2.934335294019313e-05,
"loss": 0.6302,
"step": 25
},
{
"epoch": 0.62,
"grad_norm": 0.6974115967750549,
"learning_rate": 3.521202352823175e-05,
"loss": 0.6181,
"step": 30
},
{
"epoch": 0.73,
"grad_norm": 2.8611502647399902,
"learning_rate": 4.1080694116270374e-05,
"loss": 0.5431,
"step": 35
},
{
"epoch": 0.83,
"grad_norm": 1.6229726076126099,
"learning_rate": 4.6949364704309e-05,
"loss": 0.6722,
"step": 40
},
{
"epoch": 0.94,
"grad_norm": 3.6481218338012695,
"learning_rate": 5.281803529234763e-05,
"loss": 0.6342,
"step": 45
},
{
"epoch": 1.0,
"eval_f1": 0.7326732673267327,
"eval_loss": 0.7596015930175781,
"eval_runtime": 1.3819,
"eval_samples_per_second": 46.313,
"eval_steps_per_second": 5.789,
"step": 48
},
{
"epoch": 1.04,
"grad_norm": 4.841191291809082,
"learning_rate": 5.607840784125798e-05,
"loss": 0.7106,
"step": 50
},
{
"epoch": 1.15,
"grad_norm": 0.9242434501647949,
"learning_rate": 5.542633333147591e-05,
"loss": 0.703,
"step": 55
},
{
"epoch": 1.25,
"grad_norm": 1.9776262044906616,
"learning_rate": 5.4774258821693835e-05,
"loss": 0.4098,
"step": 60
},
{
"epoch": 1.35,
"grad_norm": 0.7563148140907288,
"learning_rate": 5.412218431191177e-05,
"loss": 0.5599,
"step": 65
},
{
"epoch": 1.46,
"grad_norm": 1.44520103931427,
"learning_rate": 5.34701098021297e-05,
"loss": 0.4823,
"step": 70
},
{
"epoch": 1.56,
"grad_norm": Infinity,
"learning_rate": 5.294845019430404e-05,
"loss": 0.7782,
"step": 75
},
{
"epoch": 1.67,
"grad_norm": 2.0363686084747314,
"learning_rate": 5.229637568452197e-05,
"loss": 0.6186,
"step": 80
},
{
"epoch": 1.77,
"grad_norm": 1.6177257299423218,
"learning_rate": 5.16443011747399e-05,
"loss": 0.5907,
"step": 85
},
{
"epoch": 1.88,
"grad_norm": 1.8490890264511108,
"learning_rate": 5.0992226664957835e-05,
"loss": 0.5909,
"step": 90
},
{
"epoch": 1.98,
"grad_norm": 6.961443901062012,
"learning_rate": 5.034015215517576e-05,
"loss": 0.603,
"step": 95
},
{
"epoch": 2.0,
"eval_f1": 0.74,
"eval_loss": 0.6603889465332031,
"eval_runtime": 1.4385,
"eval_samples_per_second": 44.492,
"eval_steps_per_second": 5.561,
"step": 96
},
{
"epoch": 2.08,
"grad_norm": 1.232475757598877,
"learning_rate": 4.981849254735011e-05,
"loss": 0.572,
"step": 100
},
{
"epoch": 2.19,
"grad_norm": 1.0792688131332397,
"learning_rate": 4.9166418037568046e-05,
"loss": 0.4763,
"step": 105
},
{
"epoch": 2.29,
"grad_norm": 1.4945162534713745,
"learning_rate": 4.8514343527785974e-05,
"loss": 0.5616,
"step": 110
},
{
"epoch": 2.4,
"grad_norm": 1.323120355606079,
"learning_rate": 4.78622690180039e-05,
"loss": 0.5489,
"step": 115
},
{
"epoch": 2.5,
"grad_norm": 5.674253463745117,
"learning_rate": 4.721019450822183e-05,
"loss": 0.5401,
"step": 120
},
{
"epoch": 2.6,
"grad_norm": 4.727626323699951,
"learning_rate": 4.668853490039618e-05,
"loss": 0.4278,
"step": 125
},
{
"epoch": 2.71,
"grad_norm": 8.496376037597656,
"learning_rate": 4.6036460390614105e-05,
"loss": 0.6216,
"step": 130
},
{
"epoch": 2.81,
"grad_norm": 2.796064615249634,
"learning_rate": 4.538438588083204e-05,
"loss": 0.3347,
"step": 135
},
{
"epoch": 2.92,
"grad_norm": 7.401052951812744,
"learning_rate": 4.486272627300638e-05,
"loss": 0.3109,
"step": 140
},
{
"epoch": 3.0,
"eval_f1": 0.72,
"eval_loss": 0.8806066513061523,
"eval_runtime": 1.3788,
"eval_samples_per_second": 46.418,
"eval_steps_per_second": 5.802,
"step": 144
},
{
"epoch": 3.02,
"grad_norm": 3.1919455528259277,
"learning_rate": 4.421065176322431e-05,
"loss": 0.485,
"step": 145
},
{
"epoch": 3.12,
"grad_norm": 56.47049331665039,
"learning_rate": 4.355857725344224e-05,
"loss": 0.3607,
"step": 150
},
{
"epoch": 3.23,
"grad_norm": 4.917665481567383,
"learning_rate": 4.290650274366017e-05,
"loss": 0.2077,
"step": 155
},
{
"epoch": 3.33,
"grad_norm": 8.88060188293457,
"learning_rate": 4.238484313583451e-05,
"loss": 0.6496,
"step": 160
},
{
"epoch": 3.44,
"grad_norm": 1.9647566080093384,
"learning_rate": 4.1732768626052447e-05,
"loss": 0.1394,
"step": 165
},
{
"epoch": 3.54,
"grad_norm": 50.96482849121094,
"learning_rate": 4.1080694116270374e-05,
"loss": 0.2834,
"step": 170
},
{
"epoch": 3.65,
"grad_norm": 132.5663299560547,
"learning_rate": 4.042861960648831e-05,
"loss": 0.485,
"step": 175
},
{
"epoch": 3.75,
"grad_norm": 3.1624388694763184,
"learning_rate": 3.977654509670624e-05,
"loss": 0.2353,
"step": 180
},
{
"epoch": 3.85,
"grad_norm": 0.5671400427818298,
"learning_rate": 3.912447058692417e-05,
"loss": 0.3058,
"step": 185
},
{
"epoch": 3.96,
"grad_norm": 23.86330223083496,
"learning_rate": 3.84723960771421e-05,
"loss": 0.3764,
"step": 190
},
{
"epoch": 4.0,
"eval_f1": 0.7500000000000001,
"eval_loss": 1.1437296867370605,
"eval_runtime": 1.4019,
"eval_samples_per_second": 45.651,
"eval_steps_per_second": 5.706,
"step": 192
},
{
"epoch": 4.06,
"grad_norm": 0.443531334400177,
"learning_rate": 3.782032156736003e-05,
"loss": 0.2652,
"step": 195
},
{
"epoch": 4.17,
"grad_norm": 5.118373394012451,
"learning_rate": 3.716824705757796e-05,
"loss": 0.3585,
"step": 200
},
{
"epoch": 4.27,
"grad_norm": 0.7104140520095825,
"learning_rate": 3.651617254779589e-05,
"loss": 0.2533,
"step": 205
},
{
"epoch": 4.38,
"grad_norm": Infinity,
"learning_rate": 3.599451293997023e-05,
"loss": 0.1963,
"step": 210
},
{
"epoch": 4.48,
"grad_norm": 1.0030689239501953,
"learning_rate": 3.5342438430188165e-05,
"loss": 0.0602,
"step": 215
},
{
"epoch": 4.58,
"grad_norm": 193.33819580078125,
"learning_rate": 3.469036392040609e-05,
"loss": 0.2754,
"step": 220
},
{
"epoch": 4.69,
"grad_norm": 1.3048127889633179,
"learning_rate": 3.403828941062403e-05,
"loss": 0.1878,
"step": 225
},
{
"epoch": 4.79,
"grad_norm": 0.18795377016067505,
"learning_rate": 3.3386214900841956e-05,
"loss": 0.0245,
"step": 230
},
{
"epoch": 4.9,
"grad_norm": 0.7579631805419922,
"learning_rate": 3.2864555293016304e-05,
"loss": 0.3502,
"step": 235
},
{
"epoch": 5.0,
"grad_norm": 0.39707404375076294,
"learning_rate": 3.221248078323424e-05,
"loss": 0.2378,
"step": 240
},
{
"epoch": 5.0,
"eval_f1": 0.7865168539325843,
"eval_loss": 1.2380468845367432,
"eval_runtime": 1.3627,
"eval_samples_per_second": 46.967,
"eval_steps_per_second": 5.871,
"step": 240
}
],
"logging_steps": 5,
"max_steps": 480,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 3654362860415712.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"learning_rate": 5.6339237645170805e-05,
"per_device_train_batch_size": 4
}
}