content
/
openthaigpt_Finetuning
/openthaigpt-finetune
/kumpun-output
/checkpoint-200
/trainer_state.json
{ | |
"best_metric": 0.3133811950683594, | |
"best_model_checkpoint": "./kumpun-output/checkpoint-200", | |
"epoch": 1.0582010582010581, | |
"eval_steps": 200, | |
"global_step": 200, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.05, | |
"grad_norm": 5.246432304382324, | |
"learning_rate": 7.000000000000001e-05, | |
"loss": 4.0922, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.11, | |
"grad_norm": 3.076571226119995, | |
"learning_rate": 0.00017, | |
"loss": 3.5074, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.16, | |
"grad_norm": 11.657795906066895, | |
"learning_rate": 0.00026000000000000003, | |
"loss": 2.2509, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.21, | |
"grad_norm": 3.2525696754455566, | |
"learning_rate": 0.00034, | |
"loss": 0.8157, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.26, | |
"grad_norm": 2.755035400390625, | |
"learning_rate": 0.00044, | |
"loss": 0.5126, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.32, | |
"grad_norm": 2.1165263652801514, | |
"learning_rate": 0.00054, | |
"loss": 0.4988, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.37, | |
"grad_norm": 0.805057168006897, | |
"learning_rate": 0.00064, | |
"loss": 0.2959, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.42, | |
"grad_norm": 0.8083710074424744, | |
"learning_rate": 0.00074, | |
"loss": 0.2779, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.48, | |
"grad_norm": 0.7004362344741821, | |
"learning_rate": 0.00084, | |
"loss": 0.244, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.53, | |
"grad_norm": 1.5553646087646484, | |
"learning_rate": 0.00094, | |
"loss": 0.3729, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.58, | |
"grad_norm": 0.8424310684204102, | |
"learning_rate": 0.0009914346895074947, | |
"loss": 0.3408, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.63, | |
"grad_norm": 6.2793192863464355, | |
"learning_rate": 0.0009700214132762313, | |
"loss": 0.3011, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.69, | |
"grad_norm": 0.8749362826347351, | |
"learning_rate": 0.0009486081370449678, | |
"loss": 0.249, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.74, | |
"grad_norm": 0.6772909760475159, | |
"learning_rate": 0.0009271948608137045, | |
"loss": 0.2258, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.79, | |
"grad_norm": 1.4329707622528076, | |
"learning_rate": 0.0009057815845824411, | |
"loss": 0.2977, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.85, | |
"grad_norm": 1.501881718635559, | |
"learning_rate": 0.0008843683083511778, | |
"loss": 0.2276, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.9, | |
"grad_norm": 1.2264193296432495, | |
"learning_rate": 0.0008629550321199144, | |
"loss": 0.2724, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.95, | |
"grad_norm": 0.8799680471420288, | |
"learning_rate": 0.0008415417558886511, | |
"loss": 0.2358, | |
"step": 180 | |
}, | |
{ | |
"epoch": 1.01, | |
"grad_norm": 23.913034439086914, | |
"learning_rate": 0.0008201284796573875, | |
"loss": 0.4151, | |
"step": 190 | |
}, | |
{ | |
"epoch": 1.06, | |
"grad_norm": 2.3118550777435303, | |
"learning_rate": 0.0007987152034261242, | |
"loss": 0.4287, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.06, | |
"eval_loss": 0.3133811950683594, | |
"eval_runtime": 0.6704, | |
"eval_samples_per_second": 14.916, | |
"eval_steps_per_second": 2.983, | |
"step": 200 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 567, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 3, | |
"save_steps": 200, | |
"total_flos": 1229703996899328.0, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |