|
{ |
|
"best_metric": 0.7333333333333333, |
|
"best_model_checkpoint": "spatial_vit_temporal_vit-finetuned-ucf101-subset/checkpoint-148", |
|
"epoch": 3.22972972972973, |
|
"eval_steps": 500, |
|
"global_step": 148, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 2.3018, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.81203007518797e-05, |
|
"loss": 2.2636, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.43609022556391e-05, |
|
"loss": 2.2165, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_accuracy": 0.26666666666666666, |
|
"eval_loss": 2.0699431896209717, |
|
"eval_runtime": 107.0916, |
|
"eval_samples_per_second": 0.28, |
|
"eval_steps_per_second": 0.037, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 4.0601503759398494e-05, |
|
"loss": 2.1348, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 3.6842105263157895e-05, |
|
"loss": 2.008, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 3.3082706766917295e-05, |
|
"loss": 1.9693, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 2.9323308270676693e-05, |
|
"loss": 1.8229, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_accuracy": 0.5, |
|
"eval_loss": 1.816007375717163, |
|
"eval_runtime": 108.4965, |
|
"eval_samples_per_second": 0.277, |
|
"eval_steps_per_second": 0.037, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 2.556390977443609e-05, |
|
"loss": 1.8013, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 2.1804511278195487e-05, |
|
"loss": 1.6652, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 1.8045112781954888e-05, |
|
"loss": 1.6004, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 1.4285714285714285e-05, |
|
"loss": 1.4707, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"eval_accuracy": 0.7, |
|
"eval_loss": 1.4157180786132812, |
|
"eval_runtime": 108.1494, |
|
"eval_samples_per_second": 0.277, |
|
"eval_steps_per_second": 0.037, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 1.4613, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 6.766917293233083e-06, |
|
"loss": 1.3109, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 3.007518796992481e-06, |
|
"loss": 1.3886, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"eval_accuracy": 0.7333333333333333, |
|
"eval_loss": 1.3519634008407593, |
|
"eval_runtime": 108.7503, |
|
"eval_samples_per_second": 0.276, |
|
"eval_steps_per_second": 0.037, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"step": 148, |
|
"total_flos": 0.0, |
|
"train_loss": 1.7922898885366079, |
|
"train_runtime": 5180.8771, |
|
"train_samples_per_second": 0.229, |
|
"train_steps_per_second": 0.029 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"eval_accuracy": 0.68, |
|
"eval_loss": 1.317222237586975, |
|
"eval_runtime": 274.8188, |
|
"eval_samples_per_second": 0.273, |
|
"eval_steps_per_second": 0.036, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 3.23, |
|
"eval_accuracy": 0.68, |
|
"eval_loss": 1.317222237586975, |
|
"eval_runtime": 271.9717, |
|
"eval_samples_per_second": 0.276, |
|
"eval_steps_per_second": 0.037, |
|
"step": 148 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 148, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|