MattyB95's picture
End of training
1b19f9d
raw
history blame
4.21 kB
{
"best_metric": 0.059659235179424286,
"best_model_checkpoint": "W:/res/Transformers/VIT-MFCC-Synthetic-Voice-Detection\\checkpoint-6346",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 9519,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"learning_rate": 4.7373673705221136e-05,
"loss": 0.2498,
"step": 500
},
{
"epoch": 0.32,
"learning_rate": 4.4747347410442276e-05,
"loss": 0.153,
"step": 1000
},
{
"epoch": 0.47,
"learning_rate": 4.212102111566341e-05,
"loss": 0.0707,
"step": 1500
},
{
"epoch": 0.63,
"learning_rate": 3.949469482088455e-05,
"loss": 0.0424,
"step": 2000
},
{
"epoch": 0.79,
"learning_rate": 3.686836852610568e-05,
"loss": 0.0532,
"step": 2500
},
{
"epoch": 0.95,
"learning_rate": 3.424204223132682e-05,
"loss": 0.0283,
"step": 3000
},
{
"epoch": 1.0,
"eval_accuracy": 0.9796731605216551,
"eval_f1": 0.9887974444863462,
"eval_loss": 0.09577164798974991,
"eval_precision": 0.9782293815564237,
"eval_recall": 0.9995963401506996,
"eval_runtime": 652.1425,
"eval_samples_per_second": 38.096,
"eval_steps_per_second": 4.763,
"step": 3173
},
{
"epoch": 1.1,
"learning_rate": 3.1615715936547956e-05,
"loss": 0.032,
"step": 3500
},
{
"epoch": 1.26,
"learning_rate": 2.8989389641769092e-05,
"loss": 0.0184,
"step": 4000
},
{
"epoch": 1.42,
"learning_rate": 2.636306334699023e-05,
"loss": 0.0268,
"step": 4500
},
{
"epoch": 1.58,
"learning_rate": 2.373673705221137e-05,
"loss": 0.0241,
"step": 5000
},
{
"epoch": 1.73,
"learning_rate": 2.1110410757432505e-05,
"loss": 0.0096,
"step": 5500
},
{
"epoch": 1.89,
"learning_rate": 1.8484084462653642e-05,
"loss": 0.0227,
"step": 6000
},
{
"epoch": 2.0,
"eval_accuracy": 0.9874416358074384,
"eval_f1": 0.9930316701657211,
"eval_loss": 0.059659235179424286,
"eval_precision": 0.989011477889492,
"eval_recall": 0.9970846788661644,
"eval_runtime": 654.1939,
"eval_samples_per_second": 37.977,
"eval_steps_per_second": 4.748,
"step": 6346
},
{
"epoch": 2.05,
"learning_rate": 1.585775816787478e-05,
"loss": 0.01,
"step": 6500
},
{
"epoch": 2.21,
"learning_rate": 1.3231431873095915e-05,
"loss": 0.0044,
"step": 7000
},
{
"epoch": 2.36,
"learning_rate": 1.060510557831705e-05,
"loss": 0.0005,
"step": 7500
},
{
"epoch": 2.52,
"learning_rate": 7.978779283538187e-06,
"loss": 0.0072,
"step": 8000
},
{
"epoch": 2.68,
"learning_rate": 5.352452988759324e-06,
"loss": 0.0013,
"step": 8500
},
{
"epoch": 2.84,
"learning_rate": 2.7261266939804603e-06,
"loss": 0.0026,
"step": 9000
},
{
"epoch": 2.99,
"learning_rate": 9.98003992015968e-08,
"loss": 0.0036,
"step": 9500
},
{
"epoch": 3.0,
"eval_accuracy": 0.9804379327000483,
"eval_f1": 0.9892177308426143,
"eval_loss": 0.12125047296285629,
"eval_precision": 0.9787514268153481,
"eval_recall": 0.9999102978112666,
"eval_runtime": 663.6055,
"eval_samples_per_second": 37.438,
"eval_steps_per_second": 4.68,
"step": 9519
},
{
"epoch": 3.0,
"step": 9519,
"total_flos": 5.900240089255035e+18,
"train_loss": 0.0399566938992414,
"train_runtime": 4074.0795,
"train_samples_per_second": 18.689,
"train_steps_per_second": 2.336
}
],
"logging_steps": 500,
"max_steps": 9519,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 5.900240089255035e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}