hw1 / run-3 /checkpoint-6414 /trainer_state.json
wennycooper's picture
Training in progress, epoch 3
7885f92 verified
raw
history blame
No virus
3.8 kB
{
"best_metric": 0.5015847043424765,
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-3/checkpoint-6414",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 6414,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.23386342376052385,
"grad_norm": 9.916498184204102,
"learning_rate": 1.0352460846585169e-05,
"loss": 0.588,
"step": 500
},
{
"epoch": 0.4677268475210477,
"grad_norm": 2.0442557334899902,
"learning_rate": 9.709610570468354e-06,
"loss": 0.5642,
"step": 1000
},
{
"epoch": 0.7015902712815716,
"grad_norm": 25.158737182617188,
"learning_rate": 9.066760294351542e-06,
"loss": 0.5358,
"step": 1500
},
{
"epoch": 0.9354536950420954,
"grad_norm": 21.2142276763916,
"learning_rate": 8.423910018234727e-06,
"loss": 0.5288,
"step": 2000
},
{
"epoch": 1.0,
"eval_loss": 0.6297594904899597,
"eval_matthews_correlation": 0.4181278600904661,
"eval_runtime": 0.8372,
"eval_samples_per_second": 1245.804,
"eval_steps_per_second": 78.833,
"step": 2138
},
{
"epoch": 1.1693171188026192,
"grad_norm": 40.90157699584961,
"learning_rate": 7.781059742117914e-06,
"loss": 0.4866,
"step": 2500
},
{
"epoch": 1.4031805425631432,
"grad_norm": 0.11679931730031967,
"learning_rate": 7.138209466001099e-06,
"loss": 0.4601,
"step": 3000
},
{
"epoch": 1.637043966323667,
"grad_norm": 9.670472145080566,
"learning_rate": 6.495359189884285e-06,
"loss": 0.4758,
"step": 3500
},
{
"epoch": 1.8709073900841908,
"grad_norm": 1.609247088432312,
"learning_rate": 5.852508913767472e-06,
"loss": 0.4517,
"step": 4000
},
{
"epoch": 2.0,
"eval_loss": 0.9118176102638245,
"eval_matthews_correlation": 0.4622674649892012,
"eval_runtime": 0.7476,
"eval_samples_per_second": 1395.206,
"eval_steps_per_second": 88.287,
"step": 4276
},
{
"epoch": 2.1047708138447145,
"grad_norm": 0.1278693974018097,
"learning_rate": 5.209658637650659e-06,
"loss": 0.3716,
"step": 4500
},
{
"epoch": 2.3386342376052385,
"grad_norm": 0.046489011496305466,
"learning_rate": 4.566808361533844e-06,
"loss": 0.3199,
"step": 5000
},
{
"epoch": 2.5724976613657624,
"grad_norm": 8.200950622558594,
"learning_rate": 3.923958085417031e-06,
"loss": 0.3667,
"step": 5500
},
{
"epoch": 2.8063610851262863,
"grad_norm": 0.06558237969875336,
"learning_rate": 3.281107809300217e-06,
"loss": 0.3323,
"step": 6000
},
{
"epoch": 3.0,
"eval_loss": 0.9285444021224976,
"eval_matthews_correlation": 0.5015847043424765,
"eval_runtime": 0.7214,
"eval_samples_per_second": 1445.788,
"eval_steps_per_second": 91.488,
"step": 6414
}
],
"logging_steps": 500,
"max_steps": 8552,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 97881247513548.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"learning_rate": 1.0995311122701982e-05,
"num_train_epochs": 4,
"per_device_train_batch_size": 4,
"seed": 21
}
}