|
{ |
|
"best_metric": 0.23951096832752228, |
|
"best_model_checkpoint": "./text2sql/codellama_instruct_spider_e10/checkpoint-500", |
|
"epoch": 4.803695150115473, |
|
"eval_steps": 100, |
|
"global_step": 1300, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 3.08641975308642e-07, |
|
"loss": 1.3897, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.54320987654321e-05, |
|
"loss": 1.3449, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 3.08641975308642e-05, |
|
"loss": 0.822, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_loss": 0.5313276648521423, |
|
"eval_runtime": 34.4984, |
|
"eval_samples_per_second": 29.972, |
|
"eval_steps_per_second": 3.768, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.4342, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 4.9251379038613084e-05, |
|
"loss": 0.3014, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"eval_loss": 0.2763093411922455, |
|
"eval_runtime": 34.3952, |
|
"eval_samples_per_second": 30.062, |
|
"eval_steps_per_second": 3.78, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 4.826635145784082e-05, |
|
"loss": 0.2401, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 4.728132387706856e-05, |
|
"loss": 0.2091, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"eval_loss": 0.24685852229595184, |
|
"eval_runtime": 34.5214, |
|
"eval_samples_per_second": 29.952, |
|
"eval_steps_per_second": 3.766, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 4.62962962962963e-05, |
|
"loss": 0.1837, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 4.5311268715524035e-05, |
|
"loss": 0.1697, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"eval_loss": 0.2400880753993988, |
|
"eval_runtime": 34.3334, |
|
"eval_samples_per_second": 30.116, |
|
"eval_steps_per_second": 3.786, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.432624113475177e-05, |
|
"loss": 0.1589, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 4.334121355397952e-05, |
|
"loss": 0.1495, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"eval_loss": 0.23951096832752228, |
|
"eval_runtime": 34.3515, |
|
"eval_samples_per_second": 30.101, |
|
"eval_steps_per_second": 3.784, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 4.235618597320725e-05, |
|
"loss": 0.1356, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 4.1371158392434986e-05, |
|
"loss": 0.1256, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"eval_loss": 0.2525351345539093, |
|
"eval_runtime": 34.4715, |
|
"eval_samples_per_second": 29.996, |
|
"eval_steps_per_second": 3.771, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 4.0386130811662727e-05, |
|
"loss": 0.1162, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 3.940110323089047e-05, |
|
"loss": 0.1097, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"eval_loss": 0.2641294002532959, |
|
"eval_runtime": 34.4325, |
|
"eval_samples_per_second": 30.03, |
|
"eval_steps_per_second": 3.776, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 3.84160756501182e-05, |
|
"loss": 0.1059, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 3.743104806934594e-05, |
|
"loss": 0.1107, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"eval_loss": 0.2617126703262329, |
|
"eval_runtime": 34.3286, |
|
"eval_samples_per_second": 30.121, |
|
"eval_steps_per_second": 3.787, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 3.14, |
|
"learning_rate": 3.6446020488573684e-05, |
|
"loss": 0.0929, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.546099290780142e-05, |
|
"loss": 0.0951, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"eval_loss": 0.26825329661369324, |
|
"eval_runtime": 34.3778, |
|
"eval_samples_per_second": 30.078, |
|
"eval_steps_per_second": 3.782, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 3.447596532702916e-05, |
|
"loss": 0.0885, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"learning_rate": 3.349093774625689e-05, |
|
"loss": 0.0882, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.7, |
|
"eval_loss": 0.28923752903938293, |
|
"eval_runtime": 34.5013, |
|
"eval_samples_per_second": 29.97, |
|
"eval_steps_per_second": 3.768, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.88, |
|
"learning_rate": 3.2505910165484634e-05, |
|
"loss": 0.0861, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"learning_rate": 3.1520882584712375e-05, |
|
"loss": 0.0818, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 4.06, |
|
"eval_loss": 0.31338369846343994, |
|
"eval_runtime": 34.3826, |
|
"eval_samples_per_second": 30.073, |
|
"eval_steps_per_second": 3.781, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 3.053585500394011e-05, |
|
"loss": 0.0714, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 2.9550827423167847e-05, |
|
"loss": 0.075, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"eval_loss": 0.2977830767631531, |
|
"eval_runtime": 34.4574, |
|
"eval_samples_per_second": 30.008, |
|
"eval_steps_per_second": 3.773, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 2.8565799842395592e-05, |
|
"loss": 0.0725, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 2.758077226162333e-05, |
|
"loss": 0.0745, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"eval_loss": 0.3094833493232727, |
|
"eval_runtime": 34.5265, |
|
"eval_samples_per_second": 29.948, |
|
"eval_steps_per_second": 3.765, |
|
"step": 1300 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 2700, |
|
"num_train_epochs": 10, |
|
"save_steps": 100, |
|
"total_flos": 3.968781131460772e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|