jdannem6's picture
Uploaded checkpoint-6000
d765444 verified
raw
history blame contribute delete
No virus
10.6 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.3,
"eval_steps": 2000,
"global_step": 6000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.9697519540786743,
"learning_rate": 1e-06,
"loss": 0.1629,
"step": 100
},
{
"epoch": 0.01,
"grad_norm": 1.3848124742507935,
"learning_rate": 9.898989898989898e-07,
"loss": 0.14,
"step": 200
},
{
"epoch": 0.01,
"grad_norm": 0.9986572265625,
"learning_rate": 9.797979797979797e-07,
"loss": 0.1354,
"step": 300
},
{
"epoch": 0.02,
"grad_norm": 0.11438798904418945,
"learning_rate": 9.696969696969698e-07,
"loss": 0.1182,
"step": 400
},
{
"epoch": 0.03,
"grad_norm": 0.8548241257667542,
"learning_rate": 9.595959595959596e-07,
"loss": 0.1192,
"step": 500
},
{
"epoch": 0.03,
"grad_norm": 1.5312464237213135,
"learning_rate": 9.494949494949495e-07,
"loss": 0.0997,
"step": 600
},
{
"epoch": 0.04,
"grad_norm": 0.9692059755325317,
"learning_rate": 9.393939393939395e-07,
"loss": 0.102,
"step": 700
},
{
"epoch": 0.04,
"grad_norm": 0.42864611744880676,
"learning_rate": 9.292929292929292e-07,
"loss": 0.0901,
"step": 800
},
{
"epoch": 0.04,
"grad_norm": 0.852543830871582,
"learning_rate": 9.191919191919192e-07,
"loss": 0.0937,
"step": 900
},
{
"epoch": 0.05,
"grad_norm": 0.5718303322792053,
"learning_rate": 9.09090909090909e-07,
"loss": 0.093,
"step": 1000
},
{
"epoch": 0.06,
"grad_norm": 0.9396565556526184,
"learning_rate": 8.98989898989899e-07,
"loss": 0.0892,
"step": 1100
},
{
"epoch": 0.06,
"grad_norm": 0.08157779276371002,
"learning_rate": 8.888888888888888e-07,
"loss": 0.0934,
"step": 1200
},
{
"epoch": 0.07,
"grad_norm": 0.8076322078704834,
"learning_rate": 8.787878787878787e-07,
"loss": 0.0725,
"step": 1300
},
{
"epoch": 0.07,
"grad_norm": 1.5076119899749756,
"learning_rate": 8.686868686868687e-07,
"loss": 0.0835,
"step": 1400
},
{
"epoch": 0.07,
"grad_norm": 1.1567238569259644,
"learning_rate": 8.585858585858586e-07,
"loss": 0.0747,
"step": 1500
},
{
"epoch": 0.08,
"grad_norm": 0.6817927956581116,
"learning_rate": 8.484848484848484e-07,
"loss": 0.0903,
"step": 1600
},
{
"epoch": 0.09,
"grad_norm": 0.6467050313949585,
"learning_rate": 8.383838383838383e-07,
"loss": 0.0721,
"step": 1700
},
{
"epoch": 0.09,
"grad_norm": 1.8435570001602173,
"learning_rate": 8.282828282828283e-07,
"loss": 0.0847,
"step": 1800
},
{
"epoch": 0.1,
"grad_norm": 0.6265794634819031,
"learning_rate": 8.181818181818182e-07,
"loss": 0.0687,
"step": 1900
},
{
"epoch": 0.1,
"grad_norm": 1.360060453414917,
"learning_rate": 8.08080808080808e-07,
"loss": 0.0748,
"step": 2000
},
{
"epoch": 0.1,
"eval_loss": 0.06745574623346329,
"eval_runtime": 304.7718,
"eval_samples_per_second": 3.281,
"eval_steps_per_second": 0.82,
"step": 2000
},
{
"epoch": 0.1,
"grad_norm": 0.5074820518493652,
"learning_rate": 7.97979797979798e-07,
"loss": 0.0699,
"step": 2100
},
{
"epoch": 0.11,
"grad_norm": 1.3634223937988281,
"learning_rate": 7.878787878787878e-07,
"loss": 0.0682,
"step": 2200
},
{
"epoch": 0.12,
"grad_norm": 3.0763113498687744,
"learning_rate": 7.777777777777778e-07,
"loss": 0.0786,
"step": 2300
},
{
"epoch": 0.12,
"grad_norm": 1.5437021255493164,
"learning_rate": 7.676767676767675e-07,
"loss": 0.065,
"step": 2400
},
{
"epoch": 0.12,
"grad_norm": 1.5890172719955444,
"learning_rate": 7.575757575757575e-07,
"loss": 0.0772,
"step": 2500
},
{
"epoch": 0.13,
"grad_norm": 0.9904383420944214,
"learning_rate": 7.474747474747475e-07,
"loss": 0.0735,
"step": 2600
},
{
"epoch": 0.14,
"grad_norm": 0.9233341813087463,
"learning_rate": 7.373737373737373e-07,
"loss": 0.0695,
"step": 2700
},
{
"epoch": 0.14,
"grad_norm": 0.10603007674217224,
"learning_rate": 7.272727272727272e-07,
"loss": 0.0595,
"step": 2800
},
{
"epoch": 0.14,
"grad_norm": 0.9929779767990112,
"learning_rate": 7.171717171717171e-07,
"loss": 0.0595,
"step": 2900
},
{
"epoch": 0.15,
"grad_norm": 0.4711184799671173,
"learning_rate": 7.07070707070707e-07,
"loss": 0.0606,
"step": 3000
},
{
"epoch": 0.15,
"grad_norm": 0.5627967715263367,
"learning_rate": 6.96969696969697e-07,
"loss": 0.0713,
"step": 3100
},
{
"epoch": 0.16,
"grad_norm": 0.7645086646080017,
"learning_rate": 6.868686868686868e-07,
"loss": 0.0672,
"step": 3200
},
{
"epoch": 0.17,
"grad_norm": 0.8071433901786804,
"learning_rate": 6.767676767676767e-07,
"loss": 0.0809,
"step": 3300
},
{
"epoch": 0.17,
"grad_norm": 1.281545877456665,
"learning_rate": 6.666666666666666e-07,
"loss": 0.0577,
"step": 3400
},
{
"epoch": 0.17,
"grad_norm": 1.15431547164917,
"learning_rate": 6.565656565656566e-07,
"loss": 0.0613,
"step": 3500
},
{
"epoch": 0.18,
"grad_norm": 0.42522451281547546,
"learning_rate": 6.464646464646465e-07,
"loss": 0.0531,
"step": 3600
},
{
"epoch": 0.18,
"grad_norm": 1.2078278064727783,
"learning_rate": 6.363636363636363e-07,
"loss": 0.0692,
"step": 3700
},
{
"epoch": 0.19,
"grad_norm": 2.207512855529785,
"learning_rate": 6.262626262626263e-07,
"loss": 0.0508,
"step": 3800
},
{
"epoch": 0.2,
"grad_norm": 1.696768045425415,
"learning_rate": 6.161616161616161e-07,
"loss": 0.0655,
"step": 3900
},
{
"epoch": 0.2,
"grad_norm": 0.6295761466026306,
"learning_rate": 6.060606060606061e-07,
"loss": 0.0577,
"step": 4000
},
{
"epoch": 0.2,
"eval_loss": 0.057661667466163635,
"eval_runtime": 304.4519,
"eval_samples_per_second": 3.285,
"eval_steps_per_second": 0.821,
"step": 4000
},
{
"epoch": 0.2,
"grad_norm": 1.7527456283569336,
"learning_rate": 5.959595959595959e-07,
"loss": 0.0575,
"step": 4100
},
{
"epoch": 0.21,
"grad_norm": 2.4716615676879883,
"learning_rate": 5.858585858585858e-07,
"loss": 0.0768,
"step": 4200
},
{
"epoch": 0.21,
"grad_norm": 0.5344067215919495,
"learning_rate": 5.757575757575758e-07,
"loss": 0.0571,
"step": 4300
},
{
"epoch": 0.22,
"grad_norm": 0.9244055151939392,
"learning_rate": 5.656565656565657e-07,
"loss": 0.0563,
"step": 4400
},
{
"epoch": 0.23,
"grad_norm": 1.16805100440979,
"learning_rate": 5.555555555555555e-07,
"loss": 0.0629,
"step": 4500
},
{
"epoch": 0.23,
"grad_norm": 1.6229370832443237,
"learning_rate": 5.454545454545454e-07,
"loss": 0.0641,
"step": 4600
},
{
"epoch": 0.23,
"grad_norm": 0.9978136420249939,
"learning_rate": 5.353535353535354e-07,
"loss": 0.0642,
"step": 4700
},
{
"epoch": 0.24,
"grad_norm": 0.4647519886493683,
"learning_rate": 5.252525252525253e-07,
"loss": 0.0661,
"step": 4800
},
{
"epoch": 0.24,
"grad_norm": 1.3375582695007324,
"learning_rate": 5.152525252525253e-07,
"loss": 0.0595,
"step": 4900
},
{
"epoch": 0.25,
"grad_norm": 0.08847852051258087,
"learning_rate": 5.051515151515151e-07,
"loss": 0.0461,
"step": 5000
},
{
"epoch": 0.26,
"grad_norm": 1.0537497997283936,
"learning_rate": 4.95050505050505e-07,
"loss": 0.0494,
"step": 5100
},
{
"epoch": 0.26,
"grad_norm": 0.9269624948501587,
"learning_rate": 4.849494949494949e-07,
"loss": 0.0714,
"step": 5200
},
{
"epoch": 0.27,
"grad_norm": 0.7937563061714172,
"learning_rate": 4.748484848484848e-07,
"loss": 0.065,
"step": 5300
},
{
"epoch": 0.27,
"grad_norm": 1.4173548221588135,
"learning_rate": 4.6474747474747473e-07,
"loss": 0.0496,
"step": 5400
},
{
"epoch": 0.28,
"grad_norm": 0.37847477197647095,
"learning_rate": 4.546464646464646e-07,
"loss": 0.0651,
"step": 5500
},
{
"epoch": 0.28,
"grad_norm": 0.7063392400741577,
"learning_rate": 4.445454545454545e-07,
"loss": 0.0562,
"step": 5600
},
{
"epoch": 0.28,
"grad_norm": 0.6490029096603394,
"learning_rate": 4.344444444444444e-07,
"loss": 0.0695,
"step": 5700
},
{
"epoch": 0.29,
"grad_norm": 0.8921324610710144,
"learning_rate": 4.2434343434343434e-07,
"loss": 0.0733,
"step": 5800
},
{
"epoch": 0.29,
"grad_norm": 0.5432973504066467,
"learning_rate": 4.142424242424242e-07,
"loss": 0.0589,
"step": 5900
},
{
"epoch": 0.3,
"grad_norm": 0.017520183697342873,
"learning_rate": 4.041414141414141e-07,
"loss": 0.0541,
"step": 6000
},
{
"epoch": 0.3,
"eval_loss": 0.06119654327630997,
"eval_runtime": 305.9355,
"eval_samples_per_second": 3.269,
"eval_steps_per_second": 0.817,
"step": 6000
}
],
"logging_steps": 100,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"total_flos": 4.89616507404288e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}