|
{ |
|
"best_metric": 1.1286808252334595, |
|
"best_model_checkpoint": "./mistral/22-04-24-Weni-WeniGPT-Agents-Mistral-1.0.11-SFT_Experiment with SFT and a new tokenizer configuration for chat template of mistral-2_max_steps-330_batch_8_2024-04-22_ppid_9/checkpoint-90", |
|
"epoch": 1.6071428571428572, |
|
"eval_steps": 30, |
|
"global_step": 90, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.17857142857142858, |
|
"grad_norm": 0.45132189989089966, |
|
"learning_rate": 0.00019999521087449523, |
|
"loss": 1.5157, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.35714285714285715, |
|
"grad_norm": 0.25432512164115906, |
|
"learning_rate": 0.00019942107065112286, |
|
"loss": 1.1691, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5357142857142857, |
|
"grad_norm": 0.30843719840049744, |
|
"learning_rate": 0.0001978954027238763, |
|
"loss": 1.0466, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5357142857142857, |
|
"eval_loss": 1.1547143459320068, |
|
"eval_runtime": 36.25, |
|
"eval_samples_per_second": 1.269, |
|
"eval_steps_per_second": 0.331, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.7142857142857143, |
|
"grad_norm": 0.35677382349967957, |
|
"learning_rate": 0.00019543280877920072, |
|
"loss": 1.0075, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8928571428571429, |
|
"grad_norm": 0.3124666213989258, |
|
"learning_rate": 0.00019205685752689177, |
|
"loss": 0.9662, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.0714285714285714, |
|
"grad_norm": 0.3985981047153473, |
|
"learning_rate": 0.00018779985913140924, |
|
"loss": 0.8783, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0714285714285714, |
|
"eval_loss": 1.12884521484375, |
|
"eval_runtime": 36.2483, |
|
"eval_samples_per_second": 1.269, |
|
"eval_steps_per_second": 0.331, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 0.40197721123695374, |
|
"learning_rate": 0.0001827025559814854, |
|
"loss": 0.7525, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.4285714285714286, |
|
"grad_norm": 0.4119006395339966, |
|
"learning_rate": 0.0001768137327575751, |
|
"loss": 0.6832, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.6071428571428572, |
|
"grad_norm": 0.3348449766635895, |
|
"learning_rate": 0.00017018974952906884, |
|
"loss": 0.6778, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.6071428571428572, |
|
"eval_loss": 1.1286808252334595, |
|
"eval_runtime": 36.2515, |
|
"eval_samples_per_second": 1.269, |
|
"eval_steps_per_second": 0.331, |
|
"step": 90 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 330, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 6, |
|
"save_steps": 90, |
|
"total_flos": 8.560903941678694e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|