content
/
openthaigpt_Finetuning
/openthaigpt-finetune
/kumpun-output
/checkpoint-400
/trainer_state.json
{ | |
"best_metric": 0.3039023280143738, | |
"best_model_checkpoint": "./kumpun-output/checkpoint-400", | |
"epoch": 2.1164021164021163, | |
"eval_steps": 200, | |
"global_step": 400, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.05, | |
"grad_norm": 5.246432304382324, | |
"learning_rate": 7.000000000000001e-05, | |
"loss": 4.0922, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.11, | |
"grad_norm": 3.076571226119995, | |
"learning_rate": 0.00017, | |
"loss": 3.5074, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.16, | |
"grad_norm": 11.657795906066895, | |
"learning_rate": 0.00026000000000000003, | |
"loss": 2.2509, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.21, | |
"grad_norm": 3.2525696754455566, | |
"learning_rate": 0.00034, | |
"loss": 0.8157, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.26, | |
"grad_norm": 2.755035400390625, | |
"learning_rate": 0.00044, | |
"loss": 0.5126, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.32, | |
"grad_norm": 2.1165263652801514, | |
"learning_rate": 0.00054, | |
"loss": 0.4988, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.37, | |
"grad_norm": 0.805057168006897, | |
"learning_rate": 0.00064, | |
"loss": 0.2959, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.42, | |
"grad_norm": 0.8083710074424744, | |
"learning_rate": 0.00074, | |
"loss": 0.2779, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.48, | |
"grad_norm": 0.7004362344741821, | |
"learning_rate": 0.00084, | |
"loss": 0.244, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.53, | |
"grad_norm": 1.5553646087646484, | |
"learning_rate": 0.00094, | |
"loss": 0.3729, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.58, | |
"grad_norm": 0.8424310684204102, | |
"learning_rate": 0.0009914346895074947, | |
"loss": 0.3408, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.63, | |
"grad_norm": 6.2793192863464355, | |
"learning_rate": 0.0009700214132762313, | |
"loss": 0.3011, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.69, | |
"grad_norm": 0.8749362826347351, | |
"learning_rate": 0.0009486081370449678, | |
"loss": 0.249, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.74, | |
"grad_norm": 0.6772909760475159, | |
"learning_rate": 0.0009271948608137045, | |
"loss": 0.2258, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.79, | |
"grad_norm": 1.4329707622528076, | |
"learning_rate": 0.0009057815845824411, | |
"loss": 0.2977, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.85, | |
"grad_norm": 1.501881718635559, | |
"learning_rate": 0.0008843683083511778, | |
"loss": 0.2276, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.9, | |
"grad_norm": 1.2264193296432495, | |
"learning_rate": 0.0008629550321199144, | |
"loss": 0.2724, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.95, | |
"grad_norm": 0.8799680471420288, | |
"learning_rate": 0.0008415417558886511, | |
"loss": 0.2358, | |
"step": 180 | |
}, | |
{ | |
"epoch": 1.01, | |
"grad_norm": 23.913034439086914, | |
"learning_rate": 0.0008201284796573875, | |
"loss": 0.4151, | |
"step": 190 | |
}, | |
{ | |
"epoch": 1.06, | |
"grad_norm": 2.3118550777435303, | |
"learning_rate": 0.0007987152034261242, | |
"loss": 0.4287, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.06, | |
"eval_loss": 0.3133811950683594, | |
"eval_runtime": 0.6704, | |
"eval_samples_per_second": 14.916, | |
"eval_steps_per_second": 2.983, | |
"step": 200 | |
}, | |
{ | |
"epoch": 1.11, | |
"grad_norm": 0.6871071457862854, | |
"learning_rate": 0.0007773019271948608, | |
"loss": 0.2392, | |
"step": 210 | |
}, | |
{ | |
"epoch": 1.16, | |
"grad_norm": 0.5935308933258057, | |
"learning_rate": 0.0007558886509635975, | |
"loss": 0.225, | |
"step": 220 | |
}, | |
{ | |
"epoch": 1.22, | |
"grad_norm": 0.6274584531784058, | |
"learning_rate": 0.0007344753747323341, | |
"loss": 0.174, | |
"step": 230 | |
}, | |
{ | |
"epoch": 1.27, | |
"grad_norm": 1.8129804134368896, | |
"learning_rate": 0.0007130620985010708, | |
"loss": 0.2524, | |
"step": 240 | |
}, | |
{ | |
"epoch": 1.32, | |
"grad_norm": 1.9421052932739258, | |
"learning_rate": 0.0006916488222698073, | |
"loss": 0.2796, | |
"step": 250 | |
}, | |
{ | |
"epoch": 1.38, | |
"grad_norm": 1.4675747156143188, | |
"learning_rate": 0.0006702355460385438, | |
"loss": 0.25, | |
"step": 260 | |
}, | |
{ | |
"epoch": 1.43, | |
"grad_norm": 0.9965176582336426, | |
"learning_rate": 0.0006488222698072805, | |
"loss": 0.212, | |
"step": 270 | |
}, | |
{ | |
"epoch": 1.48, | |
"grad_norm": 0.8673143982887268, | |
"learning_rate": 0.0006274089935760171, | |
"loss": 0.1898, | |
"step": 280 | |
}, | |
{ | |
"epoch": 1.53, | |
"grad_norm": 2.1284236907958984, | |
"learning_rate": 0.0006059957173447538, | |
"loss": 0.3798, | |
"step": 290 | |
}, | |
{ | |
"epoch": 1.59, | |
"grad_norm": 2.246885299682617, | |
"learning_rate": 0.0005845824411134904, | |
"loss": 0.2751, | |
"step": 300 | |
}, | |
{ | |
"epoch": 1.64, | |
"grad_norm": 1.626805305480957, | |
"learning_rate": 0.000563169164882227, | |
"loss": 0.2648, | |
"step": 310 | |
}, | |
{ | |
"epoch": 1.69, | |
"grad_norm": 1.273806095123291, | |
"learning_rate": 0.0005417558886509636, | |
"loss": 0.2163, | |
"step": 320 | |
}, | |
{ | |
"epoch": 1.75, | |
"grad_norm": 0.5863224267959595, | |
"learning_rate": 0.0005203426124197003, | |
"loss": 0.1584, | |
"step": 330 | |
}, | |
{ | |
"epoch": 1.8, | |
"grad_norm": 1.6265116930007935, | |
"learning_rate": 0.0004989293361884369, | |
"loss": 0.3904, | |
"step": 340 | |
}, | |
{ | |
"epoch": 1.85, | |
"grad_norm": 0.6448193192481995, | |
"learning_rate": 0.00047751605995717344, | |
"loss": 0.2429, | |
"step": 350 | |
}, | |
{ | |
"epoch": 1.9, | |
"grad_norm": 0.6662893295288086, | |
"learning_rate": 0.0004561027837259101, | |
"loss": 0.1899, | |
"step": 360 | |
}, | |
{ | |
"epoch": 1.96, | |
"grad_norm": 0.8907824158668518, | |
"learning_rate": 0.0004346895074946467, | |
"loss": 0.2035, | |
"step": 370 | |
}, | |
{ | |
"epoch": 2.01, | |
"grad_norm": 1.2576186656951904, | |
"learning_rate": 0.0004132762312633833, | |
"loss": 0.2224, | |
"step": 380 | |
}, | |
{ | |
"epoch": 2.06, | |
"grad_norm": 1.1375278234481812, | |
"learning_rate": 0.00039186295503211994, | |
"loss": 0.2645, | |
"step": 390 | |
}, | |
{ | |
"epoch": 2.12, | |
"grad_norm": 1.2520960569381714, | |
"learning_rate": 0.0003704496788008566, | |
"loss": 0.2, | |
"step": 400 | |
}, | |
{ | |
"epoch": 2.12, | |
"eval_loss": 0.3039023280143738, | |
"eval_runtime": 0.6751, | |
"eval_samples_per_second": 14.814, | |
"eval_steps_per_second": 2.963, | |
"step": 400 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 567, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 3, | |
"save_steps": 200, | |
"total_flos": 2456831348441088.0, | |
"train_batch_size": 1, | |
"trial_name": null, | |
"trial_params": null | |
} | |