|
{ |
|
"best_metric": 0.7326732673267327, |
|
"best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-5/checkpoint-24", |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 120, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 1.6560618877410889, |
|
"learning_rate": 9.928194518007172e-07, |
|
"loss": 0.6997, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.6409028768539429, |
|
"learning_rate": 1.9856389036014343e-06, |
|
"loss": 0.6971, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.6359347105026245, |
|
"learning_rate": 2.978458355402151e-06, |
|
"loss": 0.6953, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 1.56245756149292, |
|
"learning_rate": 3.971277807202869e-06, |
|
"loss": 0.6889, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_f1": 0.7326732673267327, |
|
"eval_loss": 0.6865768432617188, |
|
"eval_runtime": 1.3652, |
|
"eval_samples_per_second": 46.879, |
|
"eval_steps_per_second": 5.86, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 0.6976048946380615, |
|
"learning_rate": 4.743470714158982e-06, |
|
"loss": 0.6865, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 2.008615732192993, |
|
"learning_rate": 4.63315744173668e-06, |
|
"loss": 0.6681, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"grad_norm": 1.9885936975479126, |
|
"learning_rate": 4.522844169314378e-06, |
|
"loss": 0.6531, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"grad_norm": 0.5047381520271301, |
|
"learning_rate": 4.412530896892076e-06, |
|
"loss": 0.6664, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"grad_norm": 0.9776627421379089, |
|
"learning_rate": 4.302217624469774e-06, |
|
"loss": 0.6545, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_f1": 0.7326732673267327, |
|
"eval_loss": 0.680206298828125, |
|
"eval_runtime": 1.3708, |
|
"eval_samples_per_second": 46.689, |
|
"eval_steps_per_second": 5.836, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 0.46269264817237854, |
|
"learning_rate": 4.191904352047472e-06, |
|
"loss": 0.6518, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"grad_norm": 0.8560582995414734, |
|
"learning_rate": 4.081591079625171e-06, |
|
"loss": 0.6333, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 0.9135796427726746, |
|
"learning_rate": 3.971277807202869e-06, |
|
"loss": 0.6494, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"grad_norm": 0.71152263879776, |
|
"learning_rate": 3.8609645347805665e-06, |
|
"loss": 0.6444, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"grad_norm": 1.3949508666992188, |
|
"learning_rate": 3.750651262358265e-06, |
|
"loss": 0.5726, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_f1": 0.7326732673267327, |
|
"eval_loss": 0.6824684143066406, |
|
"eval_runtime": 1.373, |
|
"eval_samples_per_second": 46.614, |
|
"eval_steps_per_second": 5.827, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"grad_norm": 0.7415557503700256, |
|
"learning_rate": 3.6403379899359627e-06, |
|
"loss": 0.6271, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"grad_norm": 0.6959993243217468, |
|
"learning_rate": 3.5300247175136605e-06, |
|
"loss": 0.6316, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.54, |
|
"grad_norm": 1.2839587926864624, |
|
"learning_rate": 3.419711445091359e-06, |
|
"loss": 0.5607, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"grad_norm": 1.2485283613204956, |
|
"learning_rate": 3.309398172669057e-06, |
|
"loss": 0.6403, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"grad_norm": 1.1759371757507324, |
|
"learning_rate": 3.199084900246755e-06, |
|
"loss": 0.6242, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_f1": 0.7326732673267327, |
|
"eval_loss": 0.6892623901367188, |
|
"eval_runtime": 1.3822, |
|
"eval_samples_per_second": 46.302, |
|
"eval_steps_per_second": 5.788, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"grad_norm": 0.6085842251777649, |
|
"learning_rate": 3.0887716278244532e-06, |
|
"loss": 0.564, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"grad_norm": 0.9830076098442078, |
|
"learning_rate": 2.978458355402151e-06, |
|
"loss": 0.641, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"grad_norm": 0.6557525992393494, |
|
"learning_rate": 2.8681450829798494e-06, |
|
"loss": 0.5943, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"grad_norm": 0.7436282634735107, |
|
"learning_rate": 2.7578318105575477e-06, |
|
"loss": 0.614, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.7114256620407104, |
|
"learning_rate": 2.6475185381352455e-06, |
|
"loss": 0.6081, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_f1": 0.7326732673267327, |
|
"eval_loss": 0.6939811706542969, |
|
"eval_runtime": 1.3699, |
|
"eval_samples_per_second": 46.719, |
|
"eval_steps_per_second": 5.84, |
|
"step": 120 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 240, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 4193401989215328.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": { |
|
"learning_rate": 4.765533368643442e-06, |
|
"per_device_train_batch_size": 8 |
|
} |
|
} |
|
|