|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.1958863858961802, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.019588638589618023, |
|
"grad_norm": 0.15741384499943922, |
|
"learning_rate": 9e-05, |
|
"loss": 1.2379, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.039177277179236046, |
|
"grad_norm": 0.09727684228244954, |
|
"learning_rate": 8e-05, |
|
"loss": 0.6525, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.058765915768854066, |
|
"grad_norm": 0.048326423015033655, |
|
"learning_rate": 7e-05, |
|
"loss": 0.5573, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07835455435847209, |
|
"grad_norm": 0.019410146833111586, |
|
"learning_rate": 6e-05, |
|
"loss": 0.537, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0979431929480901, |
|
"grad_norm": 0.022042703238575993, |
|
"learning_rate": 5e-05, |
|
"loss": 0.5104, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11753183153770813, |
|
"grad_norm": 0.028125711976748517, |
|
"learning_rate": 4e-05, |
|
"loss": 0.5151, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.13712047012732614, |
|
"grad_norm": 0.018545984749469877, |
|
"learning_rate": 3e-05, |
|
"loss": 0.5003, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.15670910871694418, |
|
"grad_norm": 0.01788486630093878, |
|
"learning_rate": 2e-05, |
|
"loss": 0.5009, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.1762977473065622, |
|
"grad_norm": 0.021150819386911185, |
|
"learning_rate": 1e-05, |
|
"loss": 0.5027, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1958863858961802, |
|
"grad_norm": 0.019105309450920132, |
|
"learning_rate": 0.0, |
|
"loss": 0.4927, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"total_flos": 50865518936064.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|