|
{ |
|
"best_metric": 0.23951096832752228, |
|
"best_model_checkpoint": "./text2sql/codellama_instruct_spider_e10/checkpoint-500", |
|
"epoch": 2.956120092378753, |
|
"eval_steps": 100, |
|
"global_step": 800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.08641975308642e-07, |
|
"loss": 1.3897, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.54320987654321e-05, |
|
"loss": 1.3449, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 3.08641975308642e-05, |
|
"loss": 0.822, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_loss": 0.5313276648521423, |
|
"eval_runtime": 34.4984, |
|
"eval_samples_per_second": 29.972, |
|
"eval_steps_per_second": 3.768, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.4342, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.9251379038613084e-05, |
|
"loss": 0.3014, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"eval_loss": 0.2763093411922455, |
|
"eval_runtime": 34.3952, |
|
"eval_samples_per_second": 30.062, |
|
"eval_steps_per_second": 3.78, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 4.826635145784082e-05, |
|
"loss": 0.2401, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 4.728132387706856e-05, |
|
"loss": 0.2091, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"eval_loss": 0.24685852229595184, |
|
"eval_runtime": 34.5214, |
|
"eval_samples_per_second": 29.952, |
|
"eval_steps_per_second": 3.766, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.1837, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 4.5311268715524035e-05, |
|
"loss": 0.1697, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"eval_loss": 0.2400880753993988, |
|
"eval_runtime": 34.3334, |
|
"eval_samples_per_second": 30.116, |
|
"eval_steps_per_second": 3.786, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.432624113475177e-05, |
|
"loss": 0.1589, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 4.334121355397952e-05, |
|
"loss": 0.1495, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"eval_loss": 0.23951096832752228, |
|
"eval_runtime": 34.3515, |
|
"eval_samples_per_second": 30.101, |
|
"eval_steps_per_second": 3.784, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.235618597320725e-05, |
|
"loss": 0.1356, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 4.1371158392434986e-05, |
|
"loss": 0.1256, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"eval_loss": 0.2525351345539093, |
|
"eval_runtime": 34.4715, |
|
"eval_samples_per_second": 29.996, |
|
"eval_steps_per_second": 3.771, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.0386130811662727e-05, |
|
"loss": 0.1162, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 3.940110323089047e-05, |
|
"loss": 0.1097, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"eval_loss": 0.2641294002532959, |
|
"eval_runtime": 34.4325, |
|
"eval_samples_per_second": 30.03, |
|
"eval_steps_per_second": 3.776, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 3.84160756501182e-05, |
|
"loss": 0.1059, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 3.743104806934594e-05, |
|
"loss": 0.1107, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"eval_loss": 0.2617126703262329, |
|
"eval_runtime": 34.3286, |
|
"eval_samples_per_second": 30.121, |
|
"eval_steps_per_second": 3.787, |
|
"step": 800 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 2700, |
|
"num_train_epochs": 10, |
|
"save_steps": 100, |
|
"total_flos": 2.4444004623876096e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|