lora-8b-physic / checkpoint-672 /trainer_state.json
kloodia's picture
Upload folder using huggingface_hub
c4d7b6b verified
raw
history blame
111 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.9421364985163203,
"eval_steps": 42,
"global_step": 672,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.0981353223323822,
"learning_rate": 2e-05,
"loss": 0.641,
"step": 1
},
{
"epoch": 0.01,
"eval_loss": 0.6416735053062439,
"eval_runtime": 21.4326,
"eval_samples_per_second": 46.331,
"eval_steps_per_second": 11.618,
"step": 1
},
{
"epoch": 0.01,
"grad_norm": 0.09748291969299316,
"learning_rate": 4e-05,
"loss": 0.6396,
"step": 2
},
{
"epoch": 0.02,
"grad_norm": 0.09947647899389267,
"learning_rate": 6e-05,
"loss": 0.6397,
"step": 3
},
{
"epoch": 0.02,
"grad_norm": 0.09976381808519363,
"learning_rate": 8e-05,
"loss": 0.6371,
"step": 4
},
{
"epoch": 0.03,
"grad_norm": 0.10493721067905426,
"learning_rate": 0.0001,
"loss": 0.6491,
"step": 5
},
{
"epoch": 0.04,
"grad_norm": 0.1144007071852684,
"learning_rate": 0.00012,
"loss": 0.6218,
"step": 6
},
{
"epoch": 0.04,
"grad_norm": 0.08536222577095032,
"learning_rate": 0.00014,
"loss": 0.6177,
"step": 7
},
{
"epoch": 0.05,
"grad_norm": 0.11926598846912384,
"learning_rate": 0.00016,
"loss": 0.5861,
"step": 8
},
{
"epoch": 0.05,
"grad_norm": 0.15648387372493744,
"learning_rate": 0.00018,
"loss": 0.6006,
"step": 9
},
{
"epoch": 0.06,
"grad_norm": 0.12172720581293106,
"learning_rate": 0.0002,
"loss": 0.5845,
"step": 10
},
{
"epoch": 0.07,
"grad_norm": 0.09348208457231522,
"learning_rate": 0.0001999988739622358,
"loss": 0.5471,
"step": 11
},
{
"epoch": 0.07,
"grad_norm": 0.07471276819705963,
"learning_rate": 0.00019999549587430254,
"loss": 0.578,
"step": 12
},
{
"epoch": 0.08,
"grad_norm": 0.07200929522514343,
"learning_rate": 0.00019998986581227718,
"loss": 0.5328,
"step": 13
},
{
"epoch": 0.08,
"grad_norm": 0.07460763305425644,
"learning_rate": 0.000199981983902953,
"loss": 0.5651,
"step": 14
},
{
"epoch": 0.09,
"grad_norm": 0.07441641390323639,
"learning_rate": 0.00019997185032383664,
"loss": 0.5589,
"step": 15
},
{
"epoch": 0.09,
"grad_norm": 0.07513019442558289,
"learning_rate": 0.00019995946530314385,
"loss": 0.5736,
"step": 16
},
{
"epoch": 0.1,
"grad_norm": 0.06902395933866501,
"learning_rate": 0.00019994482911979468,
"loss": 0.556,
"step": 17
},
{
"epoch": 0.11,
"grad_norm": 0.07314619421958923,
"learning_rate": 0.00019992794210340706,
"loss": 0.5469,
"step": 18
},
{
"epoch": 0.11,
"grad_norm": 0.06833848357200623,
"learning_rate": 0.00019990880463428937,
"loss": 0.5448,
"step": 19
},
{
"epoch": 0.12,
"grad_norm": 0.07301248610019684,
"learning_rate": 0.00019988741714343177,
"loss": 0.5612,
"step": 20
},
{
"epoch": 0.12,
"grad_norm": 0.07063400000333786,
"learning_rate": 0.0001998637801124968,
"loss": 0.5446,
"step": 21
},
{
"epoch": 0.13,
"grad_norm": 0.06935883313417435,
"learning_rate": 0.00019983789407380828,
"loss": 0.5223,
"step": 22
},
{
"epoch": 0.14,
"grad_norm": 0.06576420366764069,
"learning_rate": 0.00019980975961033924,
"loss": 0.5351,
"step": 23
},
{
"epoch": 0.14,
"grad_norm": 0.07276671379804611,
"learning_rate": 0.00019977937735569915,
"loss": 0.5423,
"step": 24
},
{
"epoch": 0.15,
"grad_norm": 0.0756976306438446,
"learning_rate": 0.00019974674799411925,
"loss": 0.5344,
"step": 25
},
{
"epoch": 0.15,
"grad_norm": 0.06945928931236267,
"learning_rate": 0.00019971187226043745,
"loss": 0.5198,
"step": 26
},
{
"epoch": 0.16,
"grad_norm": 0.06587155908346176,
"learning_rate": 0.0001996747509400816,
"loss": 0.5175,
"step": 27
},
{
"epoch": 0.17,
"grad_norm": 0.0752682164311409,
"learning_rate": 0.0001996353848690519,
"loss": 0.5068,
"step": 28
},
{
"epoch": 0.17,
"grad_norm": 0.0740601122379303,
"learning_rate": 0.00019959377493390196,
"loss": 0.535,
"step": 29
},
{
"epoch": 0.18,
"grad_norm": 0.07076304405927658,
"learning_rate": 0.00019954992207171898,
"loss": 0.5079,
"step": 30
},
{
"epoch": 0.18,
"grad_norm": 0.0776033028960228,
"learning_rate": 0.00019950382727010254,
"loss": 0.5124,
"step": 31
},
{
"epoch": 0.19,
"grad_norm": 0.0779872015118599,
"learning_rate": 0.00019945549156714234,
"loss": 0.5146,
"step": 32
},
{
"epoch": 0.2,
"grad_norm": 0.08037945628166199,
"learning_rate": 0.00019940491605139498,
"loss": 0.5189,
"step": 33
},
{
"epoch": 0.2,
"grad_norm": 0.06880298256874084,
"learning_rate": 0.0001993521018618592,
"loss": 0.506,
"step": 34
},
{
"epoch": 0.21,
"grad_norm": 0.0755767747759819,
"learning_rate": 0.00019929705018795053,
"loss": 0.4997,
"step": 35
},
{
"epoch": 0.21,
"grad_norm": 0.07505559921264648,
"learning_rate": 0.00019923976226947417,
"loss": 0.502,
"step": 36
},
{
"epoch": 0.22,
"grad_norm": 0.07533205300569534,
"learning_rate": 0.00019918023939659733,
"loss": 0.5093,
"step": 37
},
{
"epoch": 0.23,
"grad_norm": 0.0748637244105339,
"learning_rate": 0.0001991184829098201,
"loss": 0.4976,
"step": 38
},
{
"epoch": 0.23,
"grad_norm": 0.076931431889534,
"learning_rate": 0.00019905449419994518,
"loss": 0.4992,
"step": 39
},
{
"epoch": 0.24,
"grad_norm": 0.07511387020349503,
"learning_rate": 0.0001989882747080466,
"loss": 0.5069,
"step": 40
},
{
"epoch": 0.24,
"grad_norm": 0.0723625123500824,
"learning_rate": 0.00019891982592543746,
"loss": 0.4952,
"step": 41
},
{
"epoch": 0.25,
"grad_norm": 0.07320375740528107,
"learning_rate": 0.00019884914939363588,
"loss": 0.5093,
"step": 42
},
{
"epoch": 0.25,
"eval_loss": 0.5259941220283508,
"eval_runtime": 21.4684,
"eval_samples_per_second": 46.254,
"eval_steps_per_second": 11.598,
"step": 42
},
{
"epoch": 0.26,
"grad_norm": 0.07251272350549698,
"learning_rate": 0.00019877624670433086,
"loss": 0.4931,
"step": 43
},
{
"epoch": 0.26,
"grad_norm": 0.07731667906045914,
"learning_rate": 0.00019870111949934599,
"loss": 0.4879,
"step": 44
},
{
"epoch": 0.27,
"grad_norm": 0.074358269572258,
"learning_rate": 0.00019862376947060264,
"loss": 0.5049,
"step": 45
},
{
"epoch": 0.27,
"grad_norm": 0.0808371901512146,
"learning_rate": 0.0001985441983600819,
"loss": 0.517,
"step": 46
},
{
"epoch": 0.28,
"grad_norm": 0.07559769600629807,
"learning_rate": 0.00019846240795978528,
"loss": 0.4834,
"step": 47
},
{
"epoch": 0.28,
"grad_norm": 0.07425505667924881,
"learning_rate": 0.00019837840011169438,
"loss": 0.5138,
"step": 48
},
{
"epoch": 0.29,
"grad_norm": 0.07782939821481705,
"learning_rate": 0.00019829217670772935,
"loss": 0.4858,
"step": 49
},
{
"epoch": 0.3,
"grad_norm": 0.0754002034664154,
"learning_rate": 0.00019820373968970642,
"loss": 0.4941,
"step": 50
},
{
"epoch": 0.3,
"grad_norm": 0.07364428788423538,
"learning_rate": 0.000198113091049294,
"loss": 0.4835,
"step": 51
},
{
"epoch": 0.31,
"grad_norm": 0.08309967815876007,
"learning_rate": 0.00019802023282796796,
"loss": 0.5237,
"step": 52
},
{
"epoch": 0.31,
"grad_norm": 0.07548778504133224,
"learning_rate": 0.00019792516711696556,
"loss": 0.4923,
"step": 53
},
{
"epoch": 0.32,
"grad_norm": 0.07607278972864151,
"learning_rate": 0.0001978278960572384,
"loss": 0.4971,
"step": 54
},
{
"epoch": 0.33,
"grad_norm": 0.07432844489812851,
"learning_rate": 0.00019772842183940422,
"loss": 0.4874,
"step": 55
},
{
"epoch": 0.33,
"grad_norm": 0.077260322868824,
"learning_rate": 0.00019762674670369755,
"loss": 0.5067,
"step": 56
},
{
"epoch": 0.34,
"grad_norm": 0.08594146370887756,
"learning_rate": 0.00019752287293991927,
"loss": 0.4804,
"step": 57
},
{
"epoch": 0.34,
"grad_norm": 0.075816310942173,
"learning_rate": 0.00019741680288738492,
"loss": 0.4738,
"step": 58
},
{
"epoch": 0.35,
"grad_norm": 0.07784326374530792,
"learning_rate": 0.00019730853893487228,
"loss": 0.4768,
"step": 59
},
{
"epoch": 0.36,
"grad_norm": 0.08903329074382782,
"learning_rate": 0.00019719808352056724,
"loss": 0.4773,
"step": 60
},
{
"epoch": 0.36,
"grad_norm": 0.07911587506532669,
"learning_rate": 0.00019708543913200924,
"loss": 0.4672,
"step": 61
},
{
"epoch": 0.37,
"grad_norm": 0.07881385087966919,
"learning_rate": 0.00019697060830603494,
"loss": 0.4824,
"step": 62
},
{
"epoch": 0.37,
"grad_norm": 0.08292945474386215,
"learning_rate": 0.00019685359362872125,
"loss": 0.4814,
"step": 63
},
{
"epoch": 0.38,
"grad_norm": 0.08237861096858978,
"learning_rate": 0.00019673439773532713,
"loss": 0.486,
"step": 64
},
{
"epoch": 0.39,
"grad_norm": 0.07958442717790604,
"learning_rate": 0.0001966130233102341,
"loss": 0.4913,
"step": 65
},
{
"epoch": 0.39,
"grad_norm": 0.07969169318675995,
"learning_rate": 0.00019648947308688593,
"loss": 0.4781,
"step": 66
},
{
"epoch": 0.4,
"grad_norm": 0.08310563862323761,
"learning_rate": 0.00019636374984772692,
"loss": 0.4811,
"step": 67
},
{
"epoch": 0.4,
"grad_norm": 0.07763976603746414,
"learning_rate": 0.00019623585642413938,
"loss": 0.4809,
"step": 68
},
{
"epoch": 0.41,
"grad_norm": 0.0927213802933693,
"learning_rate": 0.00019610579569637982,
"loss": 0.5019,
"step": 69
},
{
"epoch": 0.42,
"grad_norm": 0.08405344933271408,
"learning_rate": 0.000195973570593514,
"loss": 0.5001,
"step": 70
},
{
"epoch": 0.42,
"grad_norm": 0.07862479984760284,
"learning_rate": 0.0001958391840933512,
"loss": 0.4894,
"step": 71
},
{
"epoch": 0.43,
"grad_norm": 0.07815296947956085,
"learning_rate": 0.00019570263922237687,
"loss": 0.4676,
"step": 72
},
{
"epoch": 0.43,
"grad_norm": 0.07999672740697861,
"learning_rate": 0.00019556393905568458,
"loss": 0.4857,
"step": 73
},
{
"epoch": 0.44,
"grad_norm": 0.08266247063875198,
"learning_rate": 0.0001954230867169069,
"loss": 0.4842,
"step": 74
},
{
"epoch": 0.45,
"grad_norm": 0.08117777854204178,
"learning_rate": 0.00019528008537814486,
"loss": 0.4602,
"step": 75
},
{
"epoch": 0.45,
"grad_norm": 0.08203484117984772,
"learning_rate": 0.00019513493825989664,
"loss": 0.4761,
"step": 76
},
{
"epoch": 0.46,
"grad_norm": 0.07647153735160828,
"learning_rate": 0.00019498764863098495,
"loss": 0.4839,
"step": 77
},
{
"epoch": 0.46,
"grad_norm": 0.0811714455485344,
"learning_rate": 0.00019483821980848347,
"loss": 0.4803,
"step": 78
},
{
"epoch": 0.47,
"grad_norm": 0.08266978710889816,
"learning_rate": 0.00019468665515764215,
"loss": 0.4665,
"step": 79
},
{
"epoch": 0.47,
"grad_norm": 0.07869689911603928,
"learning_rate": 0.00019453295809181143,
"loss": 0.4857,
"step": 80
},
{
"epoch": 0.48,
"grad_norm": 0.08934654295444489,
"learning_rate": 0.00019437713207236525,
"loss": 0.4825,
"step": 81
},
{
"epoch": 0.49,
"grad_norm": 0.07842836529016495,
"learning_rate": 0.00019421918060862333,
"loss": 0.4609,
"step": 82
},
{
"epoch": 0.49,
"grad_norm": 0.08244986832141876,
"learning_rate": 0.0001940591072577719,
"loss": 0.4688,
"step": 83
},
{
"epoch": 0.5,
"grad_norm": 0.07819854468107224,
"learning_rate": 0.00019389691562478374,
"loss": 0.4665,
"step": 84
},
{
"epoch": 0.5,
"eval_loss": 0.5117939114570618,
"eval_runtime": 21.4742,
"eval_samples_per_second": 46.242,
"eval_steps_per_second": 11.595,
"step": 84
},
{
"epoch": 0.5,
"grad_norm": 0.0837428942322731,
"learning_rate": 0.0001937326093623369,
"loss": 0.4952,
"step": 85
},
{
"epoch": 0.51,
"grad_norm": 0.07781701534986496,
"learning_rate": 0.00019356619217073253,
"loss": 0.467,
"step": 86
},
{
"epoch": 0.52,
"grad_norm": 0.08447270840406418,
"learning_rate": 0.00019339766779781145,
"loss": 0.4838,
"step": 87
},
{
"epoch": 0.52,
"grad_norm": 0.08231997489929199,
"learning_rate": 0.00019322704003886987,
"loss": 0.4611,
"step": 88
},
{
"epoch": 0.53,
"grad_norm": 0.08507382869720459,
"learning_rate": 0.00019305431273657374,
"loss": 0.4757,
"step": 89
},
{
"epoch": 0.53,
"grad_norm": 0.08521989732980728,
"learning_rate": 0.0001928794897808724,
"loss": 0.4854,
"step": 90
},
{
"epoch": 0.54,
"grad_norm": 0.0963786169886589,
"learning_rate": 0.00019270257510891082,
"loss": 0.4505,
"step": 91
},
{
"epoch": 0.55,
"grad_norm": 0.08671442419290543,
"learning_rate": 0.0001925235727049411,
"loss": 0.4766,
"step": 92
},
{
"epoch": 0.55,
"grad_norm": 0.09087081998586655,
"learning_rate": 0.0001923424866002325,
"loss": 0.4966,
"step": 93
},
{
"epoch": 0.56,
"grad_norm": 0.07899381965398788,
"learning_rate": 0.00019215932087298092,
"loss": 0.4638,
"step": 94
},
{
"epoch": 0.56,
"grad_norm": 0.09070860594511032,
"learning_rate": 0.00019197407964821684,
"loss": 0.4847,
"step": 95
},
{
"epoch": 0.57,
"grad_norm": 0.0885949656367302,
"learning_rate": 0.00019178676709771258,
"loss": 0.4648,
"step": 96
},
{
"epoch": 0.58,
"grad_norm": 0.09253839403390884,
"learning_rate": 0.00019159738743988825,
"loss": 0.459,
"step": 97
},
{
"epoch": 0.58,
"grad_norm": 0.08571318536996841,
"learning_rate": 0.00019140594493971674,
"loss": 0.4797,
"step": 98
},
{
"epoch": 0.59,
"grad_norm": 0.07787954807281494,
"learning_rate": 0.0001912124439086278,
"loss": 0.4547,
"step": 99
},
{
"epoch": 0.59,
"grad_norm": 0.08822935819625854,
"learning_rate": 0.00019101688870441078,
"loss": 0.4511,
"step": 100
},
{
"epoch": 0.6,
"grad_norm": 0.08409956842660904,
"learning_rate": 0.0001908192837311166,
"loss": 0.4631,
"step": 101
},
{
"epoch": 0.61,
"grad_norm": 0.08279416710138321,
"learning_rate": 0.00019061963343895846,
"loss": 0.4696,
"step": 102
},
{
"epoch": 0.61,
"grad_norm": 0.09696204960346222,
"learning_rate": 0.00019041794232421176,
"loss": 0.4862,
"step": 103
},
{
"epoch": 0.62,
"grad_norm": 0.08494329452514648,
"learning_rate": 0.00019021421492911272,
"loss": 0.4557,
"step": 104
},
{
"epoch": 0.62,
"grad_norm": 0.08702557533979416,
"learning_rate": 0.00019000845584175616,
"loss": 0.4693,
"step": 105
},
{
"epoch": 0.63,
"grad_norm": 0.09048158675432205,
"learning_rate": 0.00018980066969599216,
"loss": 0.4714,
"step": 106
},
{
"epoch": 0.64,
"grad_norm": 0.08462114632129669,
"learning_rate": 0.0001895908611713216,
"loss": 0.4632,
"step": 107
},
{
"epoch": 0.64,
"grad_norm": 0.09956546127796173,
"learning_rate": 0.00018937903499279102,
"loss": 0.4638,
"step": 108
},
{
"epoch": 0.65,
"grad_norm": 0.08630617707967758,
"learning_rate": 0.00018916519593088584,
"loss": 0.4499,
"step": 109
},
{
"epoch": 0.65,
"grad_norm": 0.08207620680332184,
"learning_rate": 0.0001889493488014233,
"loss": 0.4603,
"step": 110
},
{
"epoch": 0.66,
"grad_norm": 0.08473565429449081,
"learning_rate": 0.00018873149846544376,
"loss": 0.4571,
"step": 111
},
{
"epoch": 0.66,
"grad_norm": 0.08818928152322769,
"learning_rate": 0.00018851164982910135,
"loss": 0.4489,
"step": 112
},
{
"epoch": 0.67,
"grad_norm": 0.08116699010133743,
"learning_rate": 0.00018828980784355338,
"loss": 0.4578,
"step": 113
},
{
"epoch": 0.68,
"grad_norm": 0.08832226693630219,
"learning_rate": 0.00018806597750484897,
"loss": 0.4719,
"step": 114
},
{
"epoch": 0.68,
"grad_norm": 0.08624406903982162,
"learning_rate": 0.0001878401638538163,
"loss": 0.4628,
"step": 115
},
{
"epoch": 0.69,
"grad_norm": 0.08936543017625809,
"learning_rate": 0.00018761237197594945,
"loss": 0.4533,
"step": 116
},
{
"epoch": 0.69,
"grad_norm": 0.08579661697149277,
"learning_rate": 0.00018738260700129354,
"loss": 0.4772,
"step": 117
},
{
"epoch": 0.7,
"grad_norm": 0.08271288126707077,
"learning_rate": 0.0001871508741043293,
"loss": 0.4773,
"step": 118
},
{
"epoch": 0.71,
"grad_norm": 0.08224964886903763,
"learning_rate": 0.0001869171785038566,
"loss": 0.4635,
"step": 119
},
{
"epoch": 0.71,
"grad_norm": 0.087012380361557,
"learning_rate": 0.00018668152546287686,
"loss": 0.4559,
"step": 120
},
{
"epoch": 0.72,
"grad_norm": 0.08352699875831604,
"learning_rate": 0.00018644392028847458,
"loss": 0.4485,
"step": 121
},
{
"epoch": 0.72,
"grad_norm": 0.08281444013118744,
"learning_rate": 0.00018620436833169772,
"loss": 0.4393,
"step": 122
},
{
"epoch": 0.73,
"grad_norm": 0.08376545459032059,
"learning_rate": 0.00018596287498743732,
"loss": 0.4525,
"step": 123
},
{
"epoch": 0.74,
"grad_norm": 0.08526434749364853,
"learning_rate": 0.0001857194456943058,
"loss": 0.4456,
"step": 124
},
{
"epoch": 0.74,
"grad_norm": 0.08151934295892715,
"learning_rate": 0.0001854740859345148,
"loss": 0.4576,
"step": 125
},
{
"epoch": 0.75,
"grad_norm": 0.08793777972459793,
"learning_rate": 0.0001852268012337514,
"loss": 0.4431,
"step": 126
},
{
"epoch": 0.75,
"eval_loss": 0.5042669773101807,
"eval_runtime": 21.4592,
"eval_samples_per_second": 46.274,
"eval_steps_per_second": 11.603,
"step": 126
},
{
"epoch": 0.75,
"grad_norm": 0.08135095983743668,
"learning_rate": 0.00018497759716105377,
"loss": 0.4384,
"step": 127
},
{
"epoch": 0.76,
"grad_norm": 0.0917576476931572,
"learning_rate": 0.0001847264793286859,
"loss": 0.4687,
"step": 128
},
{
"epoch": 0.77,
"grad_norm": 0.08832691609859467,
"learning_rate": 0.00018447345339201102,
"loss": 0.4386,
"step": 129
},
{
"epoch": 0.77,
"grad_norm": 0.08340886980295181,
"learning_rate": 0.00018421852504936438,
"loss": 0.4512,
"step": 130
},
{
"epoch": 0.78,
"grad_norm": 0.08589499443769455,
"learning_rate": 0.00018396170004192475,
"loss": 0.4387,
"step": 131
},
{
"epoch": 0.78,
"grad_norm": 0.08753557503223419,
"learning_rate": 0.00018370298415358526,
"loss": 0.4615,
"step": 132
},
{
"epoch": 0.79,
"grad_norm": 0.08406232297420502,
"learning_rate": 0.00018344238321082315,
"loss": 0.4465,
"step": 133
},
{
"epoch": 0.8,
"grad_norm": 0.08514856547117233,
"learning_rate": 0.0001831799030825685,
"loss": 0.4516,
"step": 134
},
{
"epoch": 0.8,
"grad_norm": 0.09259331226348877,
"learning_rate": 0.000182915549680072,
"loss": 0.4387,
"step": 135
},
{
"epoch": 0.81,
"grad_norm": 0.08862275630235672,
"learning_rate": 0.00018264932895677193,
"loss": 0.4434,
"step": 136
},
{
"epoch": 0.81,
"grad_norm": 0.08515379577875137,
"learning_rate": 0.0001823812469081601,
"loss": 0.4425,
"step": 137
},
{
"epoch": 0.82,
"grad_norm": 0.09041007608175278,
"learning_rate": 0.00018211130957164668,
"loss": 0.4607,
"step": 138
},
{
"epoch": 0.82,
"grad_norm": 0.08312032371759415,
"learning_rate": 0.0001818395230264244,
"loss": 0.442,
"step": 139
},
{
"epoch": 0.83,
"grad_norm": 0.08981412649154663,
"learning_rate": 0.00018156589339333152,
"loss": 0.4608,
"step": 140
},
{
"epoch": 0.84,
"grad_norm": 0.08991118520498276,
"learning_rate": 0.00018129042683471402,
"loss": 0.451,
"step": 141
},
{
"epoch": 0.84,
"grad_norm": 0.08628728240728378,
"learning_rate": 0.00018101312955428692,
"loss": 0.4453,
"step": 142
},
{
"epoch": 0.85,
"grad_norm": 0.08730859309434891,
"learning_rate": 0.00018073400779699435,
"loss": 0.4485,
"step": 143
},
{
"epoch": 0.85,
"grad_norm": 0.08489865809679031,
"learning_rate": 0.0001804530678488691,
"loss": 0.4592,
"step": 144
},
{
"epoch": 0.86,
"grad_norm": 0.08439410477876663,
"learning_rate": 0.00018017031603689102,
"loss": 0.4326,
"step": 145
},
{
"epoch": 0.87,
"grad_norm": 0.09346488118171692,
"learning_rate": 0.0001798857587288445,
"loss": 0.4484,
"step": 146
},
{
"epoch": 0.87,
"grad_norm": 0.09130821377038956,
"learning_rate": 0.00017959940233317498,
"loss": 0.4502,
"step": 147
},
{
"epoch": 0.88,
"grad_norm": 0.08846256881952286,
"learning_rate": 0.0001793112532988448,
"loss": 0.4322,
"step": 148
},
{
"epoch": 0.88,
"grad_norm": 0.09061886370182037,
"learning_rate": 0.00017902131811518786,
"loss": 0.4437,
"step": 149
},
{
"epoch": 0.89,
"grad_norm": 0.09259927272796631,
"learning_rate": 0.00017872960331176345,
"loss": 0.4545,
"step": 150
},
{
"epoch": 0.9,
"grad_norm": 0.09632189571857452,
"learning_rate": 0.00017843611545820926,
"loss": 0.4515,
"step": 151
},
{
"epoch": 0.9,
"grad_norm": 0.08714065700769424,
"learning_rate": 0.00017814086116409348,
"loss": 0.4602,
"step": 152
},
{
"epoch": 0.91,
"grad_norm": 0.09537078440189362,
"learning_rate": 0.00017784384707876576,
"loss": 0.4482,
"step": 153
},
{
"epoch": 0.91,
"grad_norm": 0.09175322949886322,
"learning_rate": 0.00017754507989120764,
"loss": 0.4681,
"step": 154
},
{
"epoch": 0.92,
"grad_norm": 0.08962789177894592,
"learning_rate": 0.00017724456632988187,
"loss": 0.4304,
"step": 155
},
{
"epoch": 0.93,
"grad_norm": 0.09643880277872086,
"learning_rate": 0.00017694231316258077,
"loss": 0.4532,
"step": 156
},
{
"epoch": 0.93,
"grad_norm": 0.08335065096616745,
"learning_rate": 0.00017663832719627402,
"loss": 0.4504,
"step": 157
},
{
"epoch": 0.94,
"grad_norm": 0.087184838950634,
"learning_rate": 0.0001763326152769551,
"loss": 0.4752,
"step": 158
},
{
"epoch": 0.94,
"grad_norm": 0.08858635276556015,
"learning_rate": 0.0001760251842894874,
"loss": 0.4413,
"step": 159
},
{
"epoch": 0.95,
"grad_norm": 0.08101391792297363,
"learning_rate": 0.00017571604115744892,
"loss": 0.4465,
"step": 160
},
{
"epoch": 0.96,
"grad_norm": 0.08623132854700089,
"learning_rate": 0.0001754051928429765,
"loss": 0.4673,
"step": 161
},
{
"epoch": 0.96,
"grad_norm": 0.0922100692987442,
"learning_rate": 0.00017509264634660895,
"loss": 0.4587,
"step": 162
},
{
"epoch": 0.97,
"grad_norm": 0.08243449032306671,
"learning_rate": 0.00017477840870712945,
"loss": 0.4368,
"step": 163
},
{
"epoch": 0.97,
"grad_norm": 0.0845554992556572,
"learning_rate": 0.00017446248700140693,
"loss": 0.4209,
"step": 164
},
{
"epoch": 0.98,
"grad_norm": 0.08277452737092972,
"learning_rate": 0.00017414488834423687,
"loss": 0.4397,
"step": 165
},
{
"epoch": 0.99,
"grad_norm": 0.0826331302523613,
"learning_rate": 0.00017382561988818086,
"loss": 0.4333,
"step": 166
},
{
"epoch": 0.99,
"grad_norm": 0.08441821485757828,
"learning_rate": 0.0001735046888234057,
"loss": 0.4496,
"step": 167
},
{
"epoch": 1.0,
"grad_norm": 0.08665426075458527,
"learning_rate": 0.00017318210237752136,
"loss": 0.4523,
"step": 168
},
{
"epoch": 1.0,
"eval_loss": 0.4984985589981079,
"eval_runtime": 21.4662,
"eval_samples_per_second": 46.259,
"eval_steps_per_second": 11.6,
"step": 168
},
{
"epoch": 1.0,
"grad_norm": 0.08923573791980743,
"learning_rate": 0.00017285786781541824,
"loss": 0.4735,
"step": 169
},
{
"epoch": 1.01,
"grad_norm": 0.08524107187986374,
"learning_rate": 0.00017253199243910357,
"loss": 0.4323,
"step": 170
},
{
"epoch": 1.01,
"grad_norm": 0.09072479605674744,
"learning_rate": 0.00017220448358753692,
"loss": 0.4617,
"step": 171
},
{
"epoch": 1.01,
"grad_norm": 0.08986588567495346,
"learning_rate": 0.0001718753486364651,
"loss": 0.4264,
"step": 172
},
{
"epoch": 1.01,
"grad_norm": 0.09549989551305771,
"learning_rate": 0.00017154459499825564,
"loss": 0.4042,
"step": 173
},
{
"epoch": 1.02,
"grad_norm": 0.09217043220996857,
"learning_rate": 0.0001712122301217304,
"loss": 0.4028,
"step": 174
},
{
"epoch": 1.02,
"grad_norm": 0.10525793582201004,
"learning_rate": 0.00017087826149199734,
"loss": 0.4176,
"step": 175
},
{
"epoch": 1.03,
"grad_norm": 0.10329723358154297,
"learning_rate": 0.00017054269663028233,
"loss": 0.4062,
"step": 176
},
{
"epoch": 1.04,
"grad_norm": 0.10040964931249619,
"learning_rate": 0.00017020554309375946,
"loss": 0.4222,
"step": 177
},
{
"epoch": 1.04,
"grad_norm": 0.10112589597702026,
"learning_rate": 0.00016986680847538106,
"loss": 0.4058,
"step": 178
},
{
"epoch": 1.05,
"grad_norm": 0.09155958890914917,
"learning_rate": 0.0001695265004037065,
"loss": 0.4045,
"step": 179
},
{
"epoch": 1.05,
"grad_norm": 0.10278405249118805,
"learning_rate": 0.00016918462654273063,
"loss": 0.4294,
"step": 180
},
{
"epoch": 1.06,
"grad_norm": 0.10295873880386353,
"learning_rate": 0.00016884119459171105,
"loss": 0.4025,
"step": 181
},
{
"epoch": 1.07,
"grad_norm": 0.09759877622127533,
"learning_rate": 0.0001684962122849946,
"loss": 0.4227,
"step": 182
},
{
"epoch": 1.07,
"grad_norm": 0.09612429887056351,
"learning_rate": 0.00016814968739184343,
"loss": 0.3991,
"step": 183
},
{
"epoch": 1.08,
"grad_norm": 0.09644313901662827,
"learning_rate": 0.00016780162771625986,
"loss": 0.4271,
"step": 184
},
{
"epoch": 1.08,
"grad_norm": 0.09357411414384842,
"learning_rate": 0.00016745204109681064,
"loss": 0.4017,
"step": 185
},
{
"epoch": 1.09,
"grad_norm": 0.09177077561616898,
"learning_rate": 0.00016710093540645056,
"loss": 0.386,
"step": 186
},
{
"epoch": 1.09,
"grad_norm": 0.10068117827177048,
"learning_rate": 0.00016674831855234486,
"loss": 0.4127,
"step": 187
},
{
"epoch": 1.1,
"grad_norm": 0.09634707123041153,
"learning_rate": 0.00016639419847569147,
"loss": 0.403,
"step": 188
},
{
"epoch": 1.11,
"grad_norm": 0.09533964842557907,
"learning_rate": 0.00016603858315154195,
"loss": 0.4004,
"step": 189
},
{
"epoch": 1.11,
"grad_norm": 0.09449424594640732,
"learning_rate": 0.00016568148058862197,
"loss": 0.4136,
"step": 190
},
{
"epoch": 1.12,
"grad_norm": 0.09432344883680344,
"learning_rate": 0.00016532289882915103,
"loss": 0.403,
"step": 191
},
{
"epoch": 1.12,
"grad_norm": 0.10496091097593307,
"learning_rate": 0.00016496284594866113,
"loss": 0.423,
"step": 192
},
{
"epoch": 1.13,
"grad_norm": 0.0957692563533783,
"learning_rate": 0.00016460133005581512,
"loss": 0.4032,
"step": 193
},
{
"epoch": 1.14,
"grad_norm": 0.10207226127386093,
"learning_rate": 0.0001642383592922239,
"loss": 0.4068,
"step": 194
},
{
"epoch": 1.14,
"grad_norm": 0.10685818642377853,
"learning_rate": 0.00016387394183226328,
"loss": 0.4194,
"step": 195
},
{
"epoch": 1.15,
"grad_norm": 0.09566125273704529,
"learning_rate": 0.00016350808588288965,
"loss": 0.3717,
"step": 196
},
{
"epoch": 1.15,
"grad_norm": 0.10426433384418488,
"learning_rate": 0.0001631407996834553,
"loss": 0.4197,
"step": 197
},
{
"epoch": 1.16,
"grad_norm": 0.09626364707946777,
"learning_rate": 0.00016277209150552285,
"loss": 0.3965,
"step": 198
},
{
"epoch": 1.17,
"grad_norm": 0.10416010022163391,
"learning_rate": 0.000162401969652679,
"loss": 0.4042,
"step": 199
},
{
"epoch": 1.17,
"grad_norm": 0.11229964345693588,
"learning_rate": 0.0001620304424603474,
"loss": 0.4039,
"step": 200
},
{
"epoch": 1.18,
"grad_norm": 0.09705965220928192,
"learning_rate": 0.00016165751829560102,
"loss": 0.4111,
"step": 201
},
{
"epoch": 1.18,
"grad_norm": 0.09751327335834503,
"learning_rate": 0.00016128320555697364,
"loss": 0.4007,
"step": 202
},
{
"epoch": 1.19,
"grad_norm": 0.09729477018117905,
"learning_rate": 0.000160907512674271,
"loss": 0.4025,
"step": 203
},
{
"epoch": 1.2,
"grad_norm": 0.09977483749389648,
"learning_rate": 0.00016053044810838046,
"loss": 0.4143,
"step": 204
},
{
"epoch": 1.2,
"grad_norm": 0.10283766686916351,
"learning_rate": 0.0001601520203510809,
"loss": 0.4093,
"step": 205
},
{
"epoch": 1.21,
"grad_norm": 0.10398785024881363,
"learning_rate": 0.00015977223792485118,
"loss": 0.409,
"step": 206
},
{
"epoch": 1.21,
"grad_norm": 0.10487958043813705,
"learning_rate": 0.0001593911093826784,
"loss": 0.4102,
"step": 207
},
{
"epoch": 1.22,
"grad_norm": 0.10372807830572128,
"learning_rate": 0.00015900864330786518,
"loss": 0.4082,
"step": 208
},
{
"epoch": 1.23,
"grad_norm": 0.09674819558858871,
"learning_rate": 0.00015862484831383644,
"loss": 0.4093,
"step": 209
},
{
"epoch": 1.23,
"grad_norm": 0.09968870133161545,
"learning_rate": 0.00015823973304394525,
"loss": 0.4237,
"step": 210
},
{
"epoch": 1.23,
"eval_loss": 0.4985295832157135,
"eval_runtime": 21.4597,
"eval_samples_per_second": 46.273,
"eval_steps_per_second": 11.603,
"step": 210
},
{
"epoch": 1.24,
"grad_norm": 0.10222364962100983,
"learning_rate": 0.00015785330617127842,
"loss": 0.4032,
"step": 211
},
{
"epoch": 1.24,
"grad_norm": 0.09916388243436813,
"learning_rate": 0.00015746557639846097,
"loss": 0.4113,
"step": 212
},
{
"epoch": 1.25,
"grad_norm": 0.1033184677362442,
"learning_rate": 0.0001570765524574602,
"loss": 0.3956,
"step": 213
},
{
"epoch": 1.26,
"grad_norm": 0.11059914529323578,
"learning_rate": 0.00015668624310938913,
"loss": 0.404,
"step": 214
},
{
"epoch": 1.26,
"grad_norm": 0.0982404425740242,
"learning_rate": 0.00015629465714430904,
"loss": 0.3875,
"step": 215
},
{
"epoch": 1.27,
"grad_norm": 0.1017359048128128,
"learning_rate": 0.0001559018033810316,
"loss": 0.4033,
"step": 216
},
{
"epoch": 1.27,
"grad_norm": 0.1090175062417984,
"learning_rate": 0.00015550769066692034,
"loss": 0.4084,
"step": 217
},
{
"epoch": 1.28,
"grad_norm": 0.1041378527879715,
"learning_rate": 0.00015511232787769123,
"loss": 0.4095,
"step": 218
},
{
"epoch": 1.28,
"grad_norm": 0.0929458811879158,
"learning_rate": 0.00015471572391721284,
"loss": 0.3892,
"step": 219
},
{
"epoch": 1.29,
"grad_norm": 0.10203150659799576,
"learning_rate": 0.00015431788771730597,
"loss": 0.3967,
"step": 220
},
{
"epoch": 1.3,
"grad_norm": 0.10682693123817444,
"learning_rate": 0.00015391882823754228,
"loss": 0.4164,
"step": 221
},
{
"epoch": 1.3,
"grad_norm": 0.10250475257635117,
"learning_rate": 0.00015351855446504268,
"loss": 0.402,
"step": 222
},
{
"epoch": 1.31,
"grad_norm": 0.09453174471855164,
"learning_rate": 0.00015311707541427487,
"loss": 0.3927,
"step": 223
},
{
"epoch": 1.31,
"grad_norm": 0.09379514306783676,
"learning_rate": 0.00015271440012685025,
"loss": 0.3825,
"step": 224
},
{
"epoch": 1.32,
"grad_norm": 0.09439584612846375,
"learning_rate": 0.00015231053767132045,
"loss": 0.3798,
"step": 225
},
{
"epoch": 1.33,
"grad_norm": 0.10218024253845215,
"learning_rate": 0.00015190549714297303,
"loss": 0.3909,
"step": 226
},
{
"epoch": 1.33,
"grad_norm": 0.10314662754535675,
"learning_rate": 0.00015149928766362657,
"loss": 0.4075,
"step": 227
},
{
"epoch": 1.34,
"grad_norm": 0.10139516741037369,
"learning_rate": 0.00015109191838142536,
"loss": 0.4125,
"step": 228
},
{
"epoch": 1.34,
"grad_norm": 0.10410083085298538,
"learning_rate": 0.0001506833984706333,
"loss": 0.3998,
"step": 229
},
{
"epoch": 1.35,
"grad_norm": 0.11478332430124283,
"learning_rate": 0.00015027373713142735,
"loss": 0.4181,
"step": 230
},
{
"epoch": 1.36,
"grad_norm": 0.10138574987649918,
"learning_rate": 0.00014986294358969028,
"loss": 0.4152,
"step": 231
},
{
"epoch": 1.36,
"grad_norm": 0.0989016592502594,
"learning_rate": 0.0001494510270968029,
"loss": 0.399,
"step": 232
},
{
"epoch": 1.37,
"grad_norm": 0.10673234611749649,
"learning_rate": 0.00014903799692943574,
"loss": 0.4246,
"step": 233
},
{
"epoch": 1.37,
"grad_norm": 0.10326199978590012,
"learning_rate": 0.00014862386238934016,
"loss": 0.4033,
"step": 234
},
{
"epoch": 1.38,
"grad_norm": 0.09947673231363297,
"learning_rate": 0.00014820863280313873,
"loss": 0.3886,
"step": 235
},
{
"epoch": 1.39,
"grad_norm": 0.09778755158185959,
"learning_rate": 0.00014779231752211548,
"loss": 0.3934,
"step": 236
},
{
"epoch": 1.39,
"grad_norm": 0.09947831183671951,
"learning_rate": 0.0001473749259220048,
"loss": 0.4147,
"step": 237
},
{
"epoch": 1.4,
"grad_norm": 0.10698696970939636,
"learning_rate": 0.00014695646740278085,
"loss": 0.3773,
"step": 238
},
{
"epoch": 1.4,
"grad_norm": 0.09899724274873734,
"learning_rate": 0.00014653695138844557,
"loss": 0.4005,
"step": 239
},
{
"epoch": 1.41,
"grad_norm": 0.10226688534021378,
"learning_rate": 0.0001461163873268164,
"loss": 0.4009,
"step": 240
},
{
"epoch": 1.42,
"grad_norm": 0.10390684753656387,
"learning_rate": 0.0001456947846893137,
"loss": 0.4129,
"step": 241
},
{
"epoch": 1.42,
"grad_norm": 0.09909753501415253,
"learning_rate": 0.0001452721529707473,
"loss": 0.3773,
"step": 242
},
{
"epoch": 1.43,
"grad_norm": 0.09814441949129105,
"learning_rate": 0.00014484850168910263,
"loss": 0.3976,
"step": 243
},
{
"epoch": 1.43,
"grad_norm": 0.10027995705604553,
"learning_rate": 0.00014442384038532665,
"loss": 0.3951,
"step": 244
},
{
"epoch": 1.44,
"grad_norm": 0.0988469123840332,
"learning_rate": 0.00014399817862311256,
"loss": 0.3734,
"step": 245
},
{
"epoch": 1.45,
"grad_norm": 0.09742960333824158,
"learning_rate": 0.00014357152598868476,
"loss": 0.3826,
"step": 246
},
{
"epoch": 1.45,
"grad_norm": 0.1014707013964653,
"learning_rate": 0.00014314389209058286,
"loss": 0.4082,
"step": 247
},
{
"epoch": 1.46,
"grad_norm": 0.10080685466527939,
"learning_rate": 0.00014271528655944522,
"loss": 0.4104,
"step": 248
},
{
"epoch": 1.46,
"grad_norm": 0.1009359136223793,
"learning_rate": 0.0001422857190477921,
"loss": 0.3912,
"step": 249
},
{
"epoch": 1.47,
"grad_norm": 0.0964767187833786,
"learning_rate": 0.0001418551992298083,
"loss": 0.3731,
"step": 250
},
{
"epoch": 1.47,
"grad_norm": 0.09848513454198837,
"learning_rate": 0.0001414237368011253,
"loss": 0.3891,
"step": 251
},
{
"epoch": 1.48,
"grad_norm": 0.10232508182525635,
"learning_rate": 0.00014099134147860286,
"loss": 0.4002,
"step": 252
},
{
"epoch": 1.48,
"eval_loss": 0.4975546896457672,
"eval_runtime": 21.466,
"eval_samples_per_second": 46.259,
"eval_steps_per_second": 11.6,
"step": 252
},
{
"epoch": 1.49,
"grad_norm": 0.10712958872318268,
"learning_rate": 0.00014055802300011027,
"loss": 0.41,
"step": 253
},
{
"epoch": 1.49,
"grad_norm": 0.10100317001342773,
"learning_rate": 0.0001401237911243069,
"loss": 0.3954,
"step": 254
},
{
"epoch": 1.5,
"grad_norm": 0.09532604366540909,
"learning_rate": 0.00013968865563042255,
"loss": 0.4111,
"step": 255
},
{
"epoch": 1.5,
"grad_norm": 0.10244431346654892,
"learning_rate": 0.00013925262631803723,
"loss": 0.4,
"step": 256
},
{
"epoch": 1.51,
"grad_norm": 0.10212967544794083,
"learning_rate": 0.00013881571300686037,
"loss": 0.3996,
"step": 257
},
{
"epoch": 1.52,
"grad_norm": 0.09606331586837769,
"learning_rate": 0.0001383779255365097,
"loss": 0.3852,
"step": 258
},
{
"epoch": 1.52,
"grad_norm": 0.10002104192972183,
"learning_rate": 0.00013793927376628976,
"loss": 0.4126,
"step": 259
},
{
"epoch": 1.53,
"grad_norm": 0.10387251526117325,
"learning_rate": 0.00013749976757496967,
"loss": 0.4146,
"step": 260
},
{
"epoch": 1.53,
"grad_norm": 0.09998749941587448,
"learning_rate": 0.00013705941686056086,
"loss": 0.4143,
"step": 261
},
{
"epoch": 1.54,
"grad_norm": 0.09770024567842484,
"learning_rate": 0.00013661823154009395,
"loss": 0.3719,
"step": 262
},
{
"epoch": 1.55,
"grad_norm": 0.10180012881755829,
"learning_rate": 0.00013617622154939564,
"loss": 0.408,
"step": 263
},
{
"epoch": 1.55,
"grad_norm": 0.10063595324754715,
"learning_rate": 0.00013573339684286472,
"loss": 0.4007,
"step": 264
},
{
"epoch": 1.56,
"grad_norm": 0.10153420269489288,
"learning_rate": 0.00013528976739324807,
"loss": 0.4054,
"step": 265
},
{
"epoch": 1.56,
"grad_norm": 0.10569385439157486,
"learning_rate": 0.0001348453431914159,
"loss": 0.4031,
"step": 266
},
{
"epoch": 1.57,
"grad_norm": 0.10000584274530411,
"learning_rate": 0.00013440013424613698,
"loss": 0.4018,
"step": 267
},
{
"epoch": 1.58,
"grad_norm": 0.09948629140853882,
"learning_rate": 0.00013395415058385296,
"loss": 0.3997,
"step": 268
},
{
"epoch": 1.58,
"grad_norm": 0.10179516673088074,
"learning_rate": 0.00013350740224845278,
"loss": 0.3872,
"step": 269
},
{
"epoch": 1.59,
"grad_norm": 0.0989665687084198,
"learning_rate": 0.00013305989930104638,
"loss": 0.3672,
"step": 270
},
{
"epoch": 1.59,
"grad_norm": 0.09988453984260559,
"learning_rate": 0.00013261165181973814,
"loss": 0.3978,
"step": 271
},
{
"epoch": 1.6,
"grad_norm": 0.09958979487419128,
"learning_rate": 0.00013216266989939988,
"loss": 0.3793,
"step": 272
},
{
"epoch": 1.61,
"grad_norm": 0.10080169141292572,
"learning_rate": 0.0001317129636514435,
"loss": 0.4012,
"step": 273
},
{
"epoch": 1.61,
"grad_norm": 0.10148239880800247,
"learning_rate": 0.00013126254320359343,
"loss": 0.3904,
"step": 274
},
{
"epoch": 1.62,
"grad_norm": 0.10728135704994202,
"learning_rate": 0.00013081141869965835,
"loss": 0.393,
"step": 275
},
{
"epoch": 1.62,
"grad_norm": 0.09917322546243668,
"learning_rate": 0.00013035960029930278,
"loss": 0.3725,
"step": 276
},
{
"epoch": 1.63,
"grad_norm": 0.10163102298974991,
"learning_rate": 0.00012990709817781837,
"loss": 0.406,
"step": 277
},
{
"epoch": 1.64,
"grad_norm": 0.09815432131290436,
"learning_rate": 0.00012945392252589465,
"loss": 0.3995,
"step": 278
},
{
"epoch": 1.64,
"grad_norm": 0.09806889295578003,
"learning_rate": 0.0001290000835493896,
"loss": 0.3952,
"step": 279
},
{
"epoch": 1.65,
"grad_norm": 0.09826052933931351,
"learning_rate": 0.0001285455914690997,
"loss": 0.402,
"step": 280
},
{
"epoch": 1.65,
"grad_norm": 0.09949176013469696,
"learning_rate": 0.0001280904565205299,
"loss": 0.3953,
"step": 281
},
{
"epoch": 1.66,
"grad_norm": 0.10278761386871338,
"learning_rate": 0.00012763468895366303,
"loss": 0.4091,
"step": 282
},
{
"epoch": 1.66,
"grad_norm": 0.10104485601186752,
"learning_rate": 0.0001271782990327289,
"loss": 0.3807,
"step": 283
},
{
"epoch": 1.67,
"grad_norm": 0.10103065520524979,
"learning_rate": 0.0001267212970359732,
"loss": 0.4136,
"step": 284
},
{
"epoch": 1.68,
"grad_norm": 0.10056735575199127,
"learning_rate": 0.0001262636932554261,
"loss": 0.3834,
"step": 285
},
{
"epoch": 1.68,
"grad_norm": 0.10619843006134033,
"learning_rate": 0.00012580549799667034,
"loss": 0.3926,
"step": 286
},
{
"epoch": 1.69,
"grad_norm": 0.10867036134004593,
"learning_rate": 0.00012534672157860928,
"loss": 0.4102,
"step": 287
},
{
"epoch": 1.69,
"grad_norm": 0.10176358371973038,
"learning_rate": 0.00012488737433323426,
"loss": 0.4013,
"step": 288
},
{
"epoch": 1.7,
"grad_norm": 0.09872467070817947,
"learning_rate": 0.00012442746660539227,
"loss": 0.3859,
"step": 289
},
{
"epoch": 1.71,
"grad_norm": 0.10109713673591614,
"learning_rate": 0.00012396700875255264,
"loss": 0.3779,
"step": 290
},
{
"epoch": 1.71,
"grad_norm": 0.10859864950180054,
"learning_rate": 0.00012350601114457396,
"loss": 0.4023,
"step": 291
},
{
"epoch": 1.72,
"grad_norm": 0.10092321038246155,
"learning_rate": 0.00012304448416347065,
"loss": 0.3998,
"step": 292
},
{
"epoch": 1.72,
"grad_norm": 0.10352278500795364,
"learning_rate": 0.0001225824382031789,
"loss": 0.4067,
"step": 293
},
{
"epoch": 1.73,
"grad_norm": 0.1029689610004425,
"learning_rate": 0.0001221198836693226,
"loss": 0.3656,
"step": 294
},
{
"epoch": 1.73,
"eval_loss": 0.4955105185508728,
"eval_runtime": 21.4362,
"eval_samples_per_second": 46.324,
"eval_steps_per_second": 11.616,
"step": 294
},
{
"epoch": 1.74,
"grad_norm": 0.10078923404216766,
"learning_rate": 0.00012165683097897931,
"loss": 0.4035,
"step": 295
},
{
"epoch": 1.74,
"grad_norm": 0.1020880788564682,
"learning_rate": 0.00012119329056044532,
"loss": 0.393,
"step": 296
},
{
"epoch": 1.75,
"grad_norm": 0.10287690162658691,
"learning_rate": 0.00012072927285300098,
"loss": 0.4005,
"step": 297
},
{
"epoch": 1.75,
"grad_norm": 0.10361482948064804,
"learning_rate": 0.00012026478830667551,
"loss": 0.3885,
"step": 298
},
{
"epoch": 1.76,
"grad_norm": 0.10416186600923538,
"learning_rate": 0.00011979984738201171,
"loss": 0.3997,
"step": 299
},
{
"epoch": 1.77,
"grad_norm": 0.10558196157217026,
"learning_rate": 0.00011933446054983035,
"loss": 0.4082,
"step": 300
},
{
"epoch": 1.77,
"grad_norm": 0.10211660712957382,
"learning_rate": 0.00011886863829099441,
"loss": 0.3798,
"step": 301
},
{
"epoch": 1.78,
"grad_norm": 0.10305824875831604,
"learning_rate": 0.00011840239109617302,
"loss": 0.3898,
"step": 302
},
{
"epoch": 1.78,
"grad_norm": 0.11154858022928238,
"learning_rate": 0.0001179357294656051,
"loss": 0.3921,
"step": 303
},
{
"epoch": 1.79,
"grad_norm": 0.1013292595744133,
"learning_rate": 0.00011746866390886305,
"loss": 0.3808,
"step": 304
},
{
"epoch": 1.8,
"grad_norm": 0.10282581299543381,
"learning_rate": 0.00011700120494461595,
"loss": 0.3811,
"step": 305
},
{
"epoch": 1.8,
"grad_norm": 0.10098174214363098,
"learning_rate": 0.0001165333631003928,
"loss": 0.3822,
"step": 306
},
{
"epoch": 1.81,
"grad_norm": 0.10231437534093857,
"learning_rate": 0.00011606514891234526,
"loss": 0.38,
"step": 307
},
{
"epoch": 1.81,
"grad_norm": 0.10368547588586807,
"learning_rate": 0.00011559657292501042,
"loss": 0.3863,
"step": 308
},
{
"epoch": 1.82,
"grad_norm": 0.10524257272481918,
"learning_rate": 0.00011512764569107351,
"loss": 0.4024,
"step": 309
},
{
"epoch": 1.82,
"grad_norm": 0.10808374732732773,
"learning_rate": 0.00011465837777113,
"loss": 0.4021,
"step": 310
},
{
"epoch": 1.83,
"grad_norm": 0.10130346566438675,
"learning_rate": 0.00011418877973344781,
"loss": 0.3857,
"step": 311
},
{
"epoch": 1.84,
"grad_norm": 0.10113856196403503,
"learning_rate": 0.00011371886215372951,
"loss": 0.4028,
"step": 312
},
{
"epoch": 1.84,
"grad_norm": 0.10313602536916733,
"learning_rate": 0.00011324863561487383,
"loss": 0.3897,
"step": 313
},
{
"epoch": 1.85,
"grad_norm": 0.0981384888291359,
"learning_rate": 0.00011277811070673765,
"loss": 0.3798,
"step": 314
},
{
"epoch": 1.85,
"grad_norm": 0.10432131588459015,
"learning_rate": 0.00011230729802589726,
"loss": 0.3941,
"step": 315
},
{
"epoch": 1.86,
"grad_norm": 0.09853876382112503,
"learning_rate": 0.00011183620817540986,
"loss": 0.3799,
"step": 316
},
{
"epoch": 1.87,
"grad_norm": 0.10610882192850113,
"learning_rate": 0.00011136485176457459,
"loss": 0.4034,
"step": 317
},
{
"epoch": 1.87,
"grad_norm": 0.09782709181308746,
"learning_rate": 0.00011089323940869392,
"loss": 0.3725,
"step": 318
},
{
"epoch": 1.88,
"grad_norm": 0.10068295896053314,
"learning_rate": 0.0001104213817288343,
"loss": 0.3721,
"step": 319
},
{
"epoch": 1.88,
"grad_norm": 0.10485327988862991,
"learning_rate": 0.00010994928935158702,
"loss": 0.3832,
"step": 320
},
{
"epoch": 1.89,
"grad_norm": 0.10914643853902817,
"learning_rate": 0.00010947697290882903,
"loss": 0.4039,
"step": 321
},
{
"epoch": 1.9,
"grad_norm": 0.10210110992193222,
"learning_rate": 0.00010900444303748332,
"loss": 0.3954,
"step": 322
},
{
"epoch": 1.9,
"grad_norm": 0.09887547791004181,
"learning_rate": 0.00010853171037927951,
"loss": 0.3711,
"step": 323
},
{
"epoch": 1.91,
"grad_norm": 0.1009160652756691,
"learning_rate": 0.0001080587855805141,
"loss": 0.3917,
"step": 324
},
{
"epoch": 1.91,
"grad_norm": 0.10089701414108276,
"learning_rate": 0.00010758567929181074,
"loss": 0.388,
"step": 325
},
{
"epoch": 1.92,
"grad_norm": 0.10800614953041077,
"learning_rate": 0.00010711240216788036,
"loss": 0.4067,
"step": 326
},
{
"epoch": 1.93,
"grad_norm": 0.10289803147315979,
"learning_rate": 0.00010663896486728133,
"loss": 0.3965,
"step": 327
},
{
"epoch": 1.93,
"grad_norm": 0.10536928474903107,
"learning_rate": 0.00010616537805217916,
"loss": 0.4033,
"step": 328
},
{
"epoch": 1.94,
"grad_norm": 0.10302754491567612,
"learning_rate": 0.00010569165238810666,
"loss": 0.3841,
"step": 329
},
{
"epoch": 1.94,
"grad_norm": 0.10210216045379639,
"learning_rate": 0.00010521779854372353,
"loss": 0.4043,
"step": 330
},
{
"epoch": 1.95,
"grad_norm": 0.10437451303005219,
"learning_rate": 0.00010474382719057631,
"loss": 0.4065,
"step": 331
},
{
"epoch": 1.96,
"grad_norm": 0.10098063200712204,
"learning_rate": 0.00010426974900285784,
"loss": 0.4029,
"step": 332
},
{
"epoch": 1.96,
"grad_norm": 0.09919014573097229,
"learning_rate": 0.00010379557465716696,
"loss": 0.3902,
"step": 333
},
{
"epoch": 1.97,
"grad_norm": 0.09987551718950272,
"learning_rate": 0.00010332131483226804,
"loss": 0.3901,
"step": 334
},
{
"epoch": 1.97,
"grad_norm": 0.10001327842473984,
"learning_rate": 0.00010284698020885053,
"loss": 0.3697,
"step": 335
},
{
"epoch": 1.98,
"grad_norm": 0.11250711232423782,
"learning_rate": 0.00010237258146928848,
"loss": 0.3744,
"step": 336
},
{
"epoch": 1.98,
"eval_loss": 0.4941823482513428,
"eval_runtime": 21.4596,
"eval_samples_per_second": 46.273,
"eval_steps_per_second": 11.603,
"step": 336
},
{
"epoch": 1.99,
"grad_norm": 0.11110812425613403,
"learning_rate": 0.00010189812929739976,
"loss": 0.3929,
"step": 337
},
{
"epoch": 1.99,
"grad_norm": 0.10165286064147949,
"learning_rate": 0.00010142363437820565,
"loss": 0.387,
"step": 338
},
{
"epoch": 2.0,
"grad_norm": 0.10643592476844788,
"learning_rate": 0.00010094910739769008,
"loss": 0.3994,
"step": 339
},
{
"epoch": 2.0,
"grad_norm": 0.10812180489301682,
"learning_rate": 0.00010047455904255909,
"loss": 0.3933,
"step": 340
},
{
"epoch": 2.01,
"grad_norm": 0.10249359905719757,
"learning_rate": 0.0001,
"loss": 0.3867,
"step": 341
},
{
"epoch": 2.01,
"grad_norm": 0.10428471863269806,
"learning_rate": 9.952544095744092e-05,
"loss": 0.3792,
"step": 342
},
{
"epoch": 2.0,
"grad_norm": 0.11811227351427078,
"learning_rate": 9.905089260230995e-05,
"loss": 0.3711,
"step": 343
},
{
"epoch": 2.01,
"grad_norm": 0.11514988541603088,
"learning_rate": 9.857636562179437e-05,
"loss": 0.3566,
"step": 344
},
{
"epoch": 2.02,
"grad_norm": 0.11116521805524826,
"learning_rate": 9.810187070260027e-05,
"loss": 0.3551,
"step": 345
},
{
"epoch": 2.02,
"grad_norm": 0.1339193433523178,
"learning_rate": 9.762741853071153e-05,
"loss": 0.3615,
"step": 346
},
{
"epoch": 2.03,
"grad_norm": 0.13373638689517975,
"learning_rate": 9.715301979114946e-05,
"loss": 0.3392,
"step": 347
},
{
"epoch": 2.03,
"grad_norm": 0.12637023627758026,
"learning_rate": 9.667868516773201e-05,
"loss": 0.3116,
"step": 348
},
{
"epoch": 2.04,
"grad_norm": 0.11064064502716064,
"learning_rate": 9.620442534283307e-05,
"loss": 0.3395,
"step": 349
},
{
"epoch": 2.05,
"grad_norm": 0.10704579204320908,
"learning_rate": 9.573025099714217e-05,
"loss": 0.3504,
"step": 350
},
{
"epoch": 2.05,
"grad_norm": 0.11721500009298325,
"learning_rate": 9.525617280942371e-05,
"loss": 0.3377,
"step": 351
},
{
"epoch": 2.06,
"grad_norm": 0.11729004979133606,
"learning_rate": 9.478220145627645e-05,
"loss": 0.346,
"step": 352
},
{
"epoch": 2.06,
"grad_norm": 0.10619601607322693,
"learning_rate": 9.430834761189338e-05,
"loss": 0.349,
"step": 353
},
{
"epoch": 2.07,
"grad_norm": 0.1071799099445343,
"learning_rate": 9.383462194782085e-05,
"loss": 0.3508,
"step": 354
},
{
"epoch": 2.08,
"grad_norm": 0.10692652314901352,
"learning_rate": 9.336103513271869e-05,
"loss": 0.3327,
"step": 355
},
{
"epoch": 2.08,
"grad_norm": 0.11744857579469681,
"learning_rate": 9.288759783211967e-05,
"loss": 0.3527,
"step": 356
},
{
"epoch": 2.09,
"grad_norm": 0.1182284876704216,
"learning_rate": 9.24143207081893e-05,
"loss": 0.3388,
"step": 357
},
{
"epoch": 2.09,
"grad_norm": 0.12824517488479614,
"learning_rate": 9.194121441948596e-05,
"loss": 0.3601,
"step": 358
},
{
"epoch": 2.1,
"grad_norm": 0.11172507703304291,
"learning_rate": 9.146828962072051e-05,
"loss": 0.3523,
"step": 359
},
{
"epoch": 2.11,
"grad_norm": 0.1088048666715622,
"learning_rate": 9.099555696251667e-05,
"loss": 0.3349,
"step": 360
},
{
"epoch": 2.11,
"grad_norm": 0.11656352877616882,
"learning_rate": 9.0523027091171e-05,
"loss": 0.3446,
"step": 361
},
{
"epoch": 2.12,
"grad_norm": 0.1138211190700531,
"learning_rate": 9.0050710648413e-05,
"loss": 0.3394,
"step": 362
},
{
"epoch": 2.12,
"grad_norm": 0.11616445332765579,
"learning_rate": 8.957861827116577e-05,
"loss": 0.3489,
"step": 363
},
{
"epoch": 2.13,
"grad_norm": 0.11222302168607712,
"learning_rate": 8.910676059130611e-05,
"loss": 0.353,
"step": 364
},
{
"epoch": 2.14,
"grad_norm": 0.11436023563146591,
"learning_rate": 8.863514823542542e-05,
"loss": 0.3269,
"step": 365
},
{
"epoch": 2.14,
"grad_norm": 0.12150812894105911,
"learning_rate": 8.81637918245902e-05,
"loss": 0.3419,
"step": 366
},
{
"epoch": 2.15,
"grad_norm": 0.11648691445589066,
"learning_rate": 8.769270197410276e-05,
"loss": 0.3374,
"step": 367
},
{
"epoch": 2.15,
"grad_norm": 0.11707336455583572,
"learning_rate": 8.722188929326236e-05,
"loss": 0.3489,
"step": 368
},
{
"epoch": 2.16,
"grad_norm": 0.11122533679008484,
"learning_rate": 8.67513643851262e-05,
"loss": 0.3476,
"step": 369
},
{
"epoch": 2.16,
"grad_norm": 0.11035927385091782,
"learning_rate": 8.628113784627053e-05,
"loss": 0.3416,
"step": 370
},
{
"epoch": 2.17,
"grad_norm": 0.11648087203502655,
"learning_rate": 8.581122026655221e-05,
"loss": 0.337,
"step": 371
},
{
"epoch": 2.18,
"grad_norm": 0.11055613309144974,
"learning_rate": 8.534162222887003e-05,
"loss": 0.3479,
"step": 372
},
{
"epoch": 2.18,
"grad_norm": 0.11321202665567398,
"learning_rate": 8.487235430892649e-05,
"loss": 0.3567,
"step": 373
},
{
"epoch": 2.19,
"grad_norm": 0.11344156414270401,
"learning_rate": 8.44034270749896e-05,
"loss": 0.3502,
"step": 374
},
{
"epoch": 2.19,
"grad_norm": 0.11430197954177856,
"learning_rate": 8.393485108765478e-05,
"loss": 0.3406,
"step": 375
},
{
"epoch": 2.2,
"grad_norm": 0.11915215104818344,
"learning_rate": 8.346663689960725e-05,
"loss": 0.3387,
"step": 376
},
{
"epoch": 2.21,
"grad_norm": 0.11410155147314072,
"learning_rate": 8.299879505538406e-05,
"loss": 0.3441,
"step": 377
},
{
"epoch": 2.21,
"grad_norm": 0.1166268065571785,
"learning_rate": 8.253133609113699e-05,
"loss": 0.3278,
"step": 378
},
{
"epoch": 2.21,
"eval_loss": 0.5011767745018005,
"eval_runtime": 21.4646,
"eval_samples_per_second": 46.262,
"eval_steps_per_second": 11.601,
"step": 378
},
{
"epoch": 2.22,
"grad_norm": 0.1214762032032013,
"learning_rate": 8.206427053439495e-05,
"loss": 0.351,
"step": 379
},
{
"epoch": 2.22,
"grad_norm": 0.1124025508761406,
"learning_rate": 8.159760890382702e-05,
"loss": 0.35,
"step": 380
},
{
"epoch": 2.23,
"grad_norm": 0.1132451742887497,
"learning_rate": 8.113136170900557e-05,
"loss": 0.336,
"step": 381
},
{
"epoch": 2.24,
"grad_norm": 0.11561685800552368,
"learning_rate": 8.066553945016968e-05,
"loss": 0.3296,
"step": 382
},
{
"epoch": 2.24,
"grad_norm": 0.11814241856336594,
"learning_rate": 8.02001526179883e-05,
"loss": 0.339,
"step": 383
},
{
"epoch": 2.25,
"grad_norm": 0.11445321887731552,
"learning_rate": 7.973521169332451e-05,
"loss": 0.3437,
"step": 384
},
{
"epoch": 2.25,
"grad_norm": 0.11405869573354721,
"learning_rate": 7.927072714699903e-05,
"loss": 0.3591,
"step": 385
},
{
"epoch": 2.26,
"grad_norm": 0.12095706909894943,
"learning_rate": 7.880670943955467e-05,
"loss": 0.3475,
"step": 386
},
{
"epoch": 2.27,
"grad_norm": 0.12278500199317932,
"learning_rate": 7.834316902102071e-05,
"loss": 0.3299,
"step": 387
},
{
"epoch": 2.27,
"grad_norm": 0.12034577131271362,
"learning_rate": 7.78801163306774e-05,
"loss": 0.3523,
"step": 388
},
{
"epoch": 2.28,
"grad_norm": 0.11056854575872421,
"learning_rate": 7.741756179682116e-05,
"loss": 0.3224,
"step": 389
},
{
"epoch": 2.28,
"grad_norm": 0.11459018290042877,
"learning_rate": 7.695551583652936e-05,
"loss": 0.3458,
"step": 390
},
{
"epoch": 2.29,
"grad_norm": 0.11864470690488815,
"learning_rate": 7.649398885542604e-05,
"loss": 0.3408,
"step": 391
},
{
"epoch": 2.3,
"grad_norm": 0.11902540177106857,
"learning_rate": 7.603299124744743e-05,
"loss": 0.337,
"step": 392
},
{
"epoch": 2.3,
"grad_norm": 0.11285750567913055,
"learning_rate": 7.557253339460777e-05,
"loss": 0.3406,
"step": 393
},
{
"epoch": 2.31,
"grad_norm": 0.11690229177474976,
"learning_rate": 7.511262566676573e-05,
"loss": 0.3407,
"step": 394
},
{
"epoch": 2.31,
"grad_norm": 0.11694052070379257,
"learning_rate": 7.465327842139074e-05,
"loss": 0.34,
"step": 395
},
{
"epoch": 2.32,
"grad_norm": 0.1170695573091507,
"learning_rate": 7.419450200332964e-05,
"loss": 0.3449,
"step": 396
},
{
"epoch": 2.32,
"grad_norm": 0.11467460542917252,
"learning_rate": 7.373630674457393e-05,
"loss": 0.3429,
"step": 397
},
{
"epoch": 2.33,
"grad_norm": 0.1154307872056961,
"learning_rate": 7.327870296402682e-05,
"loss": 0.3518,
"step": 398
},
{
"epoch": 2.34,
"grad_norm": 0.11852456629276276,
"learning_rate": 7.28217009672711e-05,
"loss": 0.3444,
"step": 399
},
{
"epoch": 2.34,
"grad_norm": 0.11891540884971619,
"learning_rate": 7.236531104633698e-05,
"loss": 0.3394,
"step": 400
},
{
"epoch": 2.35,
"grad_norm": 0.12176164239645004,
"learning_rate": 7.190954347947009e-05,
"loss": 0.322,
"step": 401
},
{
"epoch": 2.35,
"grad_norm": 0.11945579946041107,
"learning_rate": 7.145440853090034e-05,
"loss": 0.344,
"step": 402
},
{
"epoch": 2.36,
"grad_norm": 0.11632686853408813,
"learning_rate": 7.099991645061044e-05,
"loss": 0.3502,
"step": 403
},
{
"epoch": 2.37,
"grad_norm": 0.12018176913261414,
"learning_rate": 7.054607747410535e-05,
"loss": 0.3523,
"step": 404
},
{
"epoch": 2.37,
"grad_norm": 0.11948262155056,
"learning_rate": 7.009290182218166e-05,
"loss": 0.34,
"step": 405
},
{
"epoch": 2.38,
"grad_norm": 0.11255805939435959,
"learning_rate": 6.964039970069723e-05,
"loss": 0.343,
"step": 406
},
{
"epoch": 2.38,
"grad_norm": 0.1162085086107254,
"learning_rate": 6.918858130034167e-05,
"loss": 0.3451,
"step": 407
},
{
"epoch": 2.39,
"grad_norm": 0.11837758123874664,
"learning_rate": 6.87374567964066e-05,
"loss": 0.3424,
"step": 408
},
{
"epoch": 2.4,
"grad_norm": 0.11716870218515396,
"learning_rate": 6.828703634855651e-05,
"loss": 0.3449,
"step": 409
},
{
"epoch": 2.4,
"grad_norm": 0.11923764646053314,
"learning_rate": 6.783733010060018e-05,
"loss": 0.3503,
"step": 410
},
{
"epoch": 2.41,
"grad_norm": 0.11688950657844543,
"learning_rate": 6.738834818026187e-05,
"loss": 0.3515,
"step": 411
},
{
"epoch": 2.41,
"grad_norm": 0.11869969964027405,
"learning_rate": 6.694010069895362e-05,
"loss": 0.3357,
"step": 412
},
{
"epoch": 2.42,
"grad_norm": 0.11307539790868759,
"learning_rate": 6.649259775154725e-05,
"loss": 0.331,
"step": 413
},
{
"epoch": 2.43,
"grad_norm": 0.11562840640544891,
"learning_rate": 6.604584941614705e-05,
"loss": 0.352,
"step": 414
},
{
"epoch": 2.43,
"grad_norm": 0.11388909071683884,
"learning_rate": 6.559986575386307e-05,
"loss": 0.3439,
"step": 415
},
{
"epoch": 2.44,
"grad_norm": 0.11977346241474152,
"learning_rate": 6.515465680858412e-05,
"loss": 0.3686,
"step": 416
},
{
"epoch": 2.44,
"grad_norm": 0.11565675586462021,
"learning_rate": 6.471023260675196e-05,
"loss": 0.339,
"step": 417
},
{
"epoch": 2.45,
"grad_norm": 0.1227443665266037,
"learning_rate": 6.426660315713529e-05,
"loss": 0.3424,
"step": 418
},
{
"epoch": 2.46,
"grad_norm": 0.11462421715259552,
"learning_rate": 6.382377845060438e-05,
"loss": 0.3221,
"step": 419
},
{
"epoch": 2.46,
"grad_norm": 0.1177360862493515,
"learning_rate": 6.338176845990608e-05,
"loss": 0.344,
"step": 420
},
{
"epoch": 2.46,
"eval_loss": 0.5003070831298828,
"eval_runtime": 21.4617,
"eval_samples_per_second": 46.268,
"eval_steps_per_second": 11.602,
"step": 420
},
{
"epoch": 2.47,
"grad_norm": 0.11535537987947464,
"learning_rate": 6.294058313943915e-05,
"loss": 0.3384,
"step": 421
},
{
"epoch": 2.47,
"grad_norm": 0.12123331427574158,
"learning_rate": 6.250023242503031e-05,
"loss": 0.3402,
"step": 422
},
{
"epoch": 2.48,
"grad_norm": 0.11845969408750534,
"learning_rate": 6.206072623371027e-05,
"loss": 0.3327,
"step": 423
},
{
"epoch": 2.49,
"grad_norm": 0.12011318653821945,
"learning_rate": 6.16220744634903e-05,
"loss": 0.3243,
"step": 424
},
{
"epoch": 2.49,
"grad_norm": 0.12184374779462814,
"learning_rate": 6.118428699313965e-05,
"loss": 0.3431,
"step": 425
},
{
"epoch": 2.5,
"grad_norm": 0.11864135414361954,
"learning_rate": 6.0747373681962794e-05,
"loss": 0.3268,
"step": 426
},
{
"epoch": 2.5,
"grad_norm": 0.12076004594564438,
"learning_rate": 6.031134436957746e-05,
"loss": 0.3623,
"step": 427
},
{
"epoch": 2.51,
"grad_norm": 0.11430728435516357,
"learning_rate": 5.9876208875693144e-05,
"loss": 0.3307,
"step": 428
},
{
"epoch": 2.51,
"grad_norm": 0.11330832540988922,
"learning_rate": 5.9441976999889745e-05,
"loss": 0.3373,
"step": 429
},
{
"epoch": 2.52,
"grad_norm": 0.11900202184915543,
"learning_rate": 5.900865852139714e-05,
"loss": 0.3546,
"step": 430
},
{
"epoch": 2.53,
"grad_norm": 0.11765287816524506,
"learning_rate": 5.8576263198874746e-05,
"loss": 0.3286,
"step": 431
},
{
"epoch": 2.53,
"grad_norm": 0.11611023545265198,
"learning_rate": 5.814480077019173e-05,
"loss": 0.3463,
"step": 432
},
{
"epoch": 2.54,
"grad_norm": 0.116789311170578,
"learning_rate": 5.7714280952207954e-05,
"loss": 0.3449,
"step": 433
},
{
"epoch": 2.54,
"grad_norm": 0.11763904243707657,
"learning_rate": 5.728471344055482e-05,
"loss": 0.3437,
"step": 434
},
{
"epoch": 2.55,
"grad_norm": 0.11893092840909958,
"learning_rate": 5.685610790941713e-05,
"loss": 0.3513,
"step": 435
},
{
"epoch": 2.56,
"grad_norm": 0.1151190996170044,
"learning_rate": 5.6428474011315255e-05,
"loss": 0.3209,
"step": 436
},
{
"epoch": 2.56,
"grad_norm": 0.12135551869869232,
"learning_rate": 5.600182137688745e-05,
"loss": 0.3472,
"step": 437
},
{
"epoch": 2.57,
"grad_norm": 0.11336695402860641,
"learning_rate": 5.5576159614673385e-05,
"loss": 0.3272,
"step": 438
},
{
"epoch": 2.57,
"grad_norm": 0.11953918635845184,
"learning_rate": 5.515149831089739e-05,
"loss": 0.3233,
"step": 439
},
{
"epoch": 2.58,
"grad_norm": 0.1199425756931305,
"learning_rate": 5.4727847029252733e-05,
"loss": 0.3431,
"step": 440
},
{
"epoch": 2.59,
"grad_norm": 0.11986943334341049,
"learning_rate": 5.430521531068633e-05,
"loss": 0.3351,
"step": 441
},
{
"epoch": 2.59,
"grad_norm": 0.12421400845050812,
"learning_rate": 5.3883612673183616e-05,
"loss": 0.3676,
"step": 442
},
{
"epoch": 2.6,
"grad_norm": 0.11908672749996185,
"learning_rate": 5.3463048611554445e-05,
"loss": 0.3463,
"step": 443
},
{
"epoch": 2.6,
"grad_norm": 0.11805973201990128,
"learning_rate": 5.304353259721917e-05,
"loss": 0.3323,
"step": 444
},
{
"epoch": 2.61,
"grad_norm": 0.12039713561534882,
"learning_rate": 5.2625074077995215e-05,
"loss": 0.3436,
"step": 445
},
{
"epoch": 2.62,
"grad_norm": 0.11868314445018768,
"learning_rate": 5.220768247788458e-05,
"loss": 0.344,
"step": 446
},
{
"epoch": 2.62,
"grad_norm": 0.12498147040605545,
"learning_rate": 5.179136719686124e-05,
"loss": 0.3486,
"step": 447
},
{
"epoch": 2.63,
"grad_norm": 0.1151999980211258,
"learning_rate": 5.137613761065982e-05,
"loss": 0.3376,
"step": 448
},
{
"epoch": 2.63,
"grad_norm": 0.11347607523202896,
"learning_rate": 5.0962003070564254e-05,
"loss": 0.3451,
"step": 449
},
{
"epoch": 2.64,
"grad_norm": 0.11494697630405426,
"learning_rate": 5.054897290319713e-05,
"loss": 0.3212,
"step": 450
},
{
"epoch": 2.65,
"grad_norm": 0.12121488898992538,
"learning_rate": 5.013705641030978e-05,
"loss": 0.3284,
"step": 451
},
{
"epoch": 2.65,
"grad_norm": 0.11501440405845642,
"learning_rate": 4.9726262868572685e-05,
"loss": 0.336,
"step": 452
},
{
"epoch": 2.66,
"grad_norm": 0.12091628462076187,
"learning_rate": 4.931660152936672e-05,
"loss": 0.3443,
"step": 453
},
{
"epoch": 2.66,
"grad_norm": 0.11806819587945938,
"learning_rate": 4.8908081618574685e-05,
"loss": 0.337,
"step": 454
},
{
"epoch": 2.67,
"grad_norm": 0.12331026047468185,
"learning_rate": 4.8500712336373454e-05,
"loss": 0.3424,
"step": 455
},
{
"epoch": 2.68,
"grad_norm": 0.11950333416461945,
"learning_rate": 4.809450285702697e-05,
"loss": 0.3439,
"step": 456
},
{
"epoch": 2.68,
"grad_norm": 0.12057134509086609,
"learning_rate": 4.7689462328679555e-05,
"loss": 0.3473,
"step": 457
},
{
"epoch": 2.69,
"grad_norm": 0.12127847224473953,
"learning_rate": 4.728559987314974e-05,
"loss": 0.3409,
"step": 458
},
{
"epoch": 2.69,
"grad_norm": 0.12870007753372192,
"learning_rate": 4.688292458572515e-05,
"loss": 0.3732,
"step": 459
},
{
"epoch": 2.7,
"grad_norm": 0.11999403685331345,
"learning_rate": 4.6481445534957314e-05,
"loss": 0.3444,
"step": 460
},
{
"epoch": 2.7,
"grad_norm": 0.11927364021539688,
"learning_rate": 4.608117176245773e-05,
"loss": 0.3225,
"step": 461
},
{
"epoch": 2.71,
"grad_norm": 0.11497607827186584,
"learning_rate": 4.5682112282694075e-05,
"loss": 0.3216,
"step": 462
},
{
"epoch": 2.71,
"eval_loss": 0.4984052777290344,
"eval_runtime": 21.4488,
"eval_samples_per_second": 46.296,
"eval_steps_per_second": 11.609,
"step": 462
},
{
"epoch": 2.72,
"grad_norm": 0.125841423869133,
"learning_rate": 4.528427608278718e-05,
"loss": 0.3703,
"step": 463
},
{
"epoch": 2.72,
"grad_norm": 0.12718433141708374,
"learning_rate": 4.488767212230883e-05,
"loss": 0.3439,
"step": 464
},
{
"epoch": 2.73,
"grad_norm": 0.12245268374681473,
"learning_rate": 4.4492309333079686e-05,
"loss": 0.3512,
"step": 465
},
{
"epoch": 2.73,
"grad_norm": 0.11985602229833603,
"learning_rate": 4.409819661896839e-05,
"loss": 0.3378,
"step": 466
},
{
"epoch": 2.74,
"grad_norm": 0.12222087383270264,
"learning_rate": 4.3705342855691e-05,
"loss": 0.3514,
"step": 467
},
{
"epoch": 2.75,
"grad_norm": 0.11923618614673615,
"learning_rate": 4.331375689061089e-05,
"loss": 0.3366,
"step": 468
},
{
"epoch": 2.75,
"grad_norm": 0.12106750160455704,
"learning_rate": 4.2923447542539787e-05,
"loss": 0.3541,
"step": 469
},
{
"epoch": 2.76,
"grad_norm": 0.12061194330453873,
"learning_rate": 4.2534423601539055e-05,
"loss": 0.3407,
"step": 470
},
{
"epoch": 2.76,
"grad_norm": 0.12152953445911407,
"learning_rate": 4.2146693828721574e-05,
"loss": 0.341,
"step": 471
},
{
"epoch": 2.77,
"grad_norm": 0.12051062285900116,
"learning_rate": 4.176026695605476e-05,
"loss": 0.341,
"step": 472
},
{
"epoch": 2.78,
"grad_norm": 0.11901510506868362,
"learning_rate": 4.1375151686163605e-05,
"loss": 0.3438,
"step": 473
},
{
"epoch": 2.78,
"grad_norm": 0.12126335501670837,
"learning_rate": 4.099135669213483e-05,
"loss": 0.3481,
"step": 474
},
{
"epoch": 2.79,
"grad_norm": 0.12033100426197052,
"learning_rate": 4.060889061732165e-05,
"loss": 0.335,
"step": 475
},
{
"epoch": 2.79,
"grad_norm": 0.1273912489414215,
"learning_rate": 4.022776207514884e-05,
"loss": 0.3575,
"step": 476
},
{
"epoch": 2.8,
"grad_norm": 0.12302430719137192,
"learning_rate": 3.984797964891914e-05,
"loss": 0.3432,
"step": 477
},
{
"epoch": 2.81,
"grad_norm": 0.1207548975944519,
"learning_rate": 3.946955189161954e-05,
"loss": 0.3342,
"step": 478
},
{
"epoch": 2.81,
"grad_norm": 0.1166522428393364,
"learning_rate": 3.9092487325728997e-05,
"loss": 0.3224,
"step": 479
},
{
"epoch": 2.82,
"grad_norm": 0.11961046606302261,
"learning_rate": 3.871679444302635e-05,
"loss": 0.3514,
"step": 480
},
{
"epoch": 2.82,
"grad_norm": 0.11425036936998367,
"learning_rate": 3.834248170439901e-05,
"loss": 0.3343,
"step": 481
},
{
"epoch": 2.83,
"grad_norm": 0.11901889741420746,
"learning_rate": 3.796955753965263e-05,
"loss": 0.3488,
"step": 482
},
{
"epoch": 2.84,
"grad_norm": 0.12250632792711258,
"learning_rate": 3.7598030347321e-05,
"loss": 0.3497,
"step": 483
},
{
"epoch": 2.84,
"grad_norm": 0.1186257004737854,
"learning_rate": 3.722790849447717e-05,
"loss": 0.3231,
"step": 484
},
{
"epoch": 2.85,
"grad_norm": 0.1168690025806427,
"learning_rate": 3.6859200316544765e-05,
"loss": 0.3201,
"step": 485
},
{
"epoch": 2.85,
"grad_norm": 0.1150059625506401,
"learning_rate": 3.64919141171104e-05,
"loss": 0.3171,
"step": 486
},
{
"epoch": 2.86,
"grad_norm": 0.1203169897198677,
"learning_rate": 3.612605816773674e-05,
"loss": 0.3447,
"step": 487
},
{
"epoch": 2.86,
"grad_norm": 0.1196681559085846,
"learning_rate": 3.576164070777611e-05,
"loss": 0.3362,
"step": 488
},
{
"epoch": 2.87,
"grad_norm": 0.12342015653848648,
"learning_rate": 3.5398669944184894e-05,
"loss": 0.3434,
"step": 489
},
{
"epoch": 2.88,
"grad_norm": 0.1177283450961113,
"learning_rate": 3.503715405133888e-05,
"loss": 0.3319,
"step": 490
},
{
"epoch": 2.88,
"grad_norm": 0.12056996673345566,
"learning_rate": 3.467710117084897e-05,
"loss": 0.3342,
"step": 491
},
{
"epoch": 2.89,
"grad_norm": 0.12151093780994415,
"learning_rate": 3.4318519411378e-05,
"loss": 0.343,
"step": 492
},
{
"epoch": 2.89,
"grad_norm": 0.12013455480337143,
"learning_rate": 3.396141684845807e-05,
"loss": 0.342,
"step": 493
},
{
"epoch": 2.9,
"grad_norm": 0.11809618771076202,
"learning_rate": 3.3605801524308535e-05,
"loss": 0.331,
"step": 494
},
{
"epoch": 2.91,
"grad_norm": 0.11887520551681519,
"learning_rate": 3.3251681447655144e-05,
"loss": 0.3468,
"step": 495
},
{
"epoch": 2.91,
"grad_norm": 0.11792317032814026,
"learning_rate": 3.289906459354948e-05,
"loss": 0.3276,
"step": 496
},
{
"epoch": 2.92,
"grad_norm": 0.11810489743947983,
"learning_rate": 3.254795890318935e-05,
"loss": 0.3376,
"step": 497
},
{
"epoch": 2.92,
"grad_norm": 0.12175795435905457,
"learning_rate": 3.219837228374018e-05,
"loss": 0.3403,
"step": 498
},
{
"epoch": 2.93,
"grad_norm": 0.11994338035583496,
"learning_rate": 3.185031260815659e-05,
"loss": 0.3252,
"step": 499
},
{
"epoch": 2.94,
"grad_norm": 0.11685537546873093,
"learning_rate": 3.150378771500542e-05,
"loss": 0.3342,
"step": 500
},
{
"epoch": 2.94,
"grad_norm": 0.11831715703010559,
"learning_rate": 3.1158805408288994e-05,
"loss": 0.3327,
"step": 501
},
{
"epoch": 2.95,
"grad_norm": 0.121400386095047,
"learning_rate": 3.081537345726936e-05,
"loss": 0.3522,
"step": 502
},
{
"epoch": 2.95,
"grad_norm": 0.11787579953670502,
"learning_rate": 3.047349959629352e-05,
"loss": 0.3322,
"step": 503
},
{
"epoch": 2.96,
"grad_norm": 0.12182048708200455,
"learning_rate": 3.0133191524618953e-05,
"loss": 0.3371,
"step": 504
},
{
"epoch": 2.96,
"eval_loss": 0.49804943799972534,
"eval_runtime": 21.4624,
"eval_samples_per_second": 46.267,
"eval_steps_per_second": 11.602,
"step": 504
},
{
"epoch": 2.97,
"grad_norm": 0.12010974436998367,
"learning_rate": 2.979445690624051e-05,
"loss": 0.3334,
"step": 505
},
{
"epoch": 2.97,
"grad_norm": 0.12134676426649094,
"learning_rate": 2.945730336971767e-05,
"loss": 0.3348,
"step": 506
},
{
"epoch": 2.98,
"grad_norm": 0.12308987230062485,
"learning_rate": 2.912173850800267e-05,
"loss": 0.3542,
"step": 507
},
{
"epoch": 2.98,
"grad_norm": 0.11681053042411804,
"learning_rate": 2.8787769878269667e-05,
"loss": 0.3355,
"step": 508
},
{
"epoch": 2.99,
"grad_norm": 0.11922267824411392,
"learning_rate": 2.8455405001744396e-05,
"loss": 0.3526,
"step": 509
},
{
"epoch": 3.0,
"grad_norm": 0.12132036685943604,
"learning_rate": 2.8124651363534938e-05,
"loss": 0.3531,
"step": 510
},
{
"epoch": 3.0,
"grad_norm": 0.12195932865142822,
"learning_rate": 2.7795516412463075e-05,
"loss": 0.3445,
"step": 511
},
{
"epoch": 3.01,
"grad_norm": 0.11576711386442184,
"learning_rate": 2.7468007560896437e-05,
"loss": 0.3432,
"step": 512
},
{
"epoch": 3.01,
"grad_norm": 0.1186043843626976,
"learning_rate": 2.714213218458178e-05,
"loss": 0.3375,
"step": 513
},
{
"epoch": 3.0,
"grad_norm": 0.12182705849409103,
"learning_rate": 2.681789762247864e-05,
"loss": 0.3116,
"step": 514
},
{
"epoch": 3.01,
"grad_norm": 0.12121383845806122,
"learning_rate": 2.6495311176594284e-05,
"loss": 0.3087,
"step": 515
},
{
"epoch": 3.02,
"grad_norm": 0.12201498448848724,
"learning_rate": 2.617438011181914e-05,
"loss": 0.3092,
"step": 516
},
{
"epoch": 3.02,
"grad_norm": 0.11646965146064758,
"learning_rate": 2.5855111655763132e-05,
"loss": 0.3042,
"step": 517
},
{
"epoch": 3.03,
"grad_norm": 0.1196390837430954,
"learning_rate": 2.553751299859308e-05,
"loss": 0.3018,
"step": 518
},
{
"epoch": 3.03,
"grad_norm": 0.1267251968383789,
"learning_rate": 2.5221591292870594e-05,
"loss": 0.2991,
"step": 519
},
{
"epoch": 3.04,
"grad_norm": 0.13034097850322723,
"learning_rate": 2.4907353653391064e-05,
"loss": 0.3078,
"step": 520
},
{
"epoch": 3.05,
"grad_norm": 0.131874218583107,
"learning_rate": 2.4594807157023526e-05,
"loss": 0.3167,
"step": 521
},
{
"epoch": 3.05,
"grad_norm": 0.1304672360420227,
"learning_rate": 2.428395884255109e-05,
"loss": 0.3209,
"step": 522
},
{
"epoch": 3.06,
"grad_norm": 0.13389524817466736,
"learning_rate": 2.39748157105126e-05,
"loss": 0.3262,
"step": 523
},
{
"epoch": 3.06,
"grad_norm": 0.12869839370250702,
"learning_rate": 2.3667384723044918e-05,
"loss": 0.3109,
"step": 524
},
{
"epoch": 3.07,
"grad_norm": 0.12486681342124939,
"learning_rate": 2.3361672803726e-05,
"loss": 0.3115,
"step": 525
},
{
"epoch": 3.08,
"grad_norm": 0.12838812172412872,
"learning_rate": 2.3057686837419245e-05,
"loss": 0.3186,
"step": 526
},
{
"epoch": 3.08,
"grad_norm": 0.12253987044095993,
"learning_rate": 2.2755433670118152e-05,
"loss": 0.3074,
"step": 527
},
{
"epoch": 3.09,
"grad_norm": 0.12514927983283997,
"learning_rate": 2.2454920108792355e-05,
"loss": 0.307,
"step": 528
},
{
"epoch": 3.09,
"grad_norm": 0.12394868582487106,
"learning_rate": 2.215615292123425e-05,
"loss": 0.3205,
"step": 529
},
{
"epoch": 3.1,
"grad_norm": 0.12297198921442032,
"learning_rate": 2.1859138835906556e-05,
"loss": 0.3065,
"step": 530
},
{
"epoch": 3.11,
"grad_norm": 0.12439598888158798,
"learning_rate": 2.1563884541790745e-05,
"loss": 0.3248,
"step": 531
},
{
"epoch": 3.11,
"grad_norm": 0.12265962362289429,
"learning_rate": 2.1270396688236592e-05,
"loss": 0.3142,
"step": 532
},
{
"epoch": 3.12,
"grad_norm": 0.1257302612066269,
"learning_rate": 2.097868188481217e-05,
"loss": 0.312,
"step": 533
},
{
"epoch": 3.12,
"grad_norm": 0.12314291298389435,
"learning_rate": 2.068874670115524e-05,
"loss": 0.3184,
"step": 534
},
{
"epoch": 3.13,
"grad_norm": 0.12145701795816422,
"learning_rate": 2.040059766682504e-05,
"loss": 0.2959,
"step": 535
},
{
"epoch": 3.14,
"grad_norm": 0.12181348353624344,
"learning_rate": 2.011424127115552e-05,
"loss": 0.3024,
"step": 536
},
{
"epoch": 3.14,
"grad_norm": 0.12109740078449249,
"learning_rate": 1.982968396310899e-05,
"loss": 0.2881,
"step": 537
},
{
"epoch": 3.15,
"grad_norm": 0.12610453367233276,
"learning_rate": 1.9546932151130913e-05,
"loss": 0.2981,
"step": 538
},
{
"epoch": 3.15,
"grad_norm": 0.12419940531253815,
"learning_rate": 1.926599220300569e-05,
"loss": 0.3119,
"step": 539
},
{
"epoch": 3.16,
"grad_norm": 0.12367010116577148,
"learning_rate": 1.898687044571311e-05,
"loss": 0.3074,
"step": 540
},
{
"epoch": 3.16,
"grad_norm": 0.12828263640403748,
"learning_rate": 1.8709573165286e-05,
"loss": 0.3132,
"step": 541
},
{
"epoch": 3.17,
"grad_norm": 0.12923619151115417,
"learning_rate": 1.843410660666852e-05,
"loss": 0.3047,
"step": 542
},
{
"epoch": 3.18,
"grad_norm": 0.12658795714378357,
"learning_rate": 1.8160476973575625e-05,
"loss": 0.3205,
"step": 543
},
{
"epoch": 3.18,
"grad_norm": 0.1237001121044159,
"learning_rate": 1.788869042835335e-05,
"loss": 0.3083,
"step": 544
},
{
"epoch": 3.19,
"grad_norm": 0.12831470370292664,
"learning_rate": 1.7618753091839923e-05,
"loss": 0.3064,
"step": 545
},
{
"epoch": 3.19,
"grad_norm": 0.12573179602622986,
"learning_rate": 1.7350671043228073e-05,
"loss": 0.3243,
"step": 546
},
{
"epoch": 3.19,
"eval_loss": 0.5050841569900513,
"eval_runtime": 21.4598,
"eval_samples_per_second": 46.273,
"eval_steps_per_second": 11.603,
"step": 546
},
{
"epoch": 3.2,
"grad_norm": 0.12148155272006989,
"learning_rate": 1.7084450319928036e-05,
"loss": 0.3205,
"step": 547
},
{
"epoch": 3.21,
"grad_norm": 0.12447548657655716,
"learning_rate": 1.6820096917431527e-05,
"loss": 0.3002,
"step": 548
},
{
"epoch": 3.21,
"grad_norm": 0.12423041462898254,
"learning_rate": 1.6557616789176846e-05,
"loss": 0.3261,
"step": 549
},
{
"epoch": 3.22,
"grad_norm": 0.12225422263145447,
"learning_rate": 1.6297015846414753e-05,
"loss": 0.2997,
"step": 550
},
{
"epoch": 3.22,
"grad_norm": 0.12243395298719406,
"learning_rate": 1.6038299958075266e-05,
"loss": 0.3004,
"step": 551
},
{
"epoch": 3.23,
"grad_norm": 0.12334055453538895,
"learning_rate": 1.5781474950635634e-05,
"loss": 0.2945,
"step": 552
},
{
"epoch": 3.24,
"grad_norm": 0.12340861558914185,
"learning_rate": 1.552654660798899e-05,
"loss": 0.3107,
"step": 553
},
{
"epoch": 3.24,
"grad_norm": 0.12131118774414062,
"learning_rate": 1.5273520671314113e-05,
"loss": 0.2888,
"step": 554
},
{
"epoch": 3.25,
"grad_norm": 0.12344761192798615,
"learning_rate": 1.5022402838946258e-05,
"loss": 0.3123,
"step": 555
},
{
"epoch": 3.25,
"grad_norm": 0.1286107301712036,
"learning_rate": 1.477319876624864e-05,
"loss": 0.3081,
"step": 556
},
{
"epoch": 3.26,
"grad_norm": 0.12453716993331909,
"learning_rate": 1.4525914065485224e-05,
"loss": 0.3063,
"step": 557
},
{
"epoch": 3.27,
"grad_norm": 0.1285639852285385,
"learning_rate": 1.4280554305694205e-05,
"loss": 0.3128,
"step": 558
},
{
"epoch": 3.27,
"grad_norm": 0.12868300080299377,
"learning_rate": 1.4037125012562701e-05,
"loss": 0.3173,
"step": 559
},
{
"epoch": 3.28,
"grad_norm": 0.1282641887664795,
"learning_rate": 1.37956316683023e-05,
"loss": 0.3149,
"step": 560
},
{
"epoch": 3.28,
"grad_norm": 0.12916123867034912,
"learning_rate": 1.3556079711525438e-05,
"loss": 0.3239,
"step": 561
},
{
"epoch": 3.29,
"grad_norm": 0.12533286213874817,
"learning_rate": 1.3318474537123138e-05,
"loss": 0.3054,
"step": 562
},
{
"epoch": 3.3,
"grad_norm": 0.12642714381217957,
"learning_rate": 1.3082821496143427e-05,
"loss": 0.3122,
"step": 563
},
{
"epoch": 3.3,
"grad_norm": 0.1210436299443245,
"learning_rate": 1.2849125895670733e-05,
"loss": 0.3106,
"step": 564
},
{
"epoch": 3.31,
"grad_norm": 0.12119234353303909,
"learning_rate": 1.26173929987065e-05,
"loss": 0.2944,
"step": 565
},
{
"epoch": 3.31,
"grad_norm": 0.12051783502101898,
"learning_rate": 1.2387628024050557e-05,
"loss": 0.2886,
"step": 566
},
{
"epoch": 3.32,
"grad_norm": 0.129972904920578,
"learning_rate": 1.2159836146183689e-05,
"loss": 0.3091,
"step": 567
},
{
"epoch": 3.32,
"grad_norm": 0.12776021659374237,
"learning_rate": 1.1934022495151064e-05,
"loss": 0.3034,
"step": 568
},
{
"epoch": 3.33,
"grad_norm": 0.12539061903953552,
"learning_rate": 1.171019215644662e-05,
"loss": 0.3092,
"step": 569
},
{
"epoch": 3.34,
"grad_norm": 0.1265249252319336,
"learning_rate": 1.1488350170898677e-05,
"loss": 0.3104,
"step": 570
},
{
"epoch": 3.34,
"grad_norm": 0.1216360554099083,
"learning_rate": 1.1268501534556242e-05,
"loss": 0.2941,
"step": 571
},
{
"epoch": 3.35,
"grad_norm": 0.12475910782814026,
"learning_rate": 1.1050651198576712e-05,
"loss": 0.3157,
"step": 572
},
{
"epoch": 3.35,
"grad_norm": 0.12245628982782364,
"learning_rate": 1.083480406911418e-05,
"loss": 0.3163,
"step": 573
},
{
"epoch": 3.36,
"grad_norm": 0.1258774697780609,
"learning_rate": 1.0620965007208994e-05,
"loss": 0.3106,
"step": 574
},
{
"epoch": 3.37,
"grad_norm": 0.1269146353006363,
"learning_rate": 1.040913882867839e-05,
"loss": 0.2964,
"step": 575
},
{
"epoch": 3.37,
"grad_norm": 0.12572172284126282,
"learning_rate": 1.019933030400786e-05,
"loss": 0.2986,
"step": 576
},
{
"epoch": 3.38,
"grad_norm": 0.12371216714382172,
"learning_rate": 9.991544158243848e-06,
"loss": 0.3162,
"step": 577
},
{
"epoch": 3.38,
"grad_norm": 0.12126260250806808,
"learning_rate": 9.78578507088731e-06,
"loss": 0.292,
"step": 578
},
{
"epoch": 3.39,
"grad_norm": 0.12398073077201843,
"learning_rate": 9.58205767578827e-06,
"loss": 0.3239,
"step": 579
},
{
"epoch": 3.4,
"grad_norm": 0.12346088886260986,
"learning_rate": 9.380366561041554e-06,
"loss": 0.3082,
"step": 580
},
{
"epoch": 3.4,
"grad_norm": 0.12363547831773758,
"learning_rate": 9.180716268883427e-06,
"loss": 0.2919,
"step": 581
},
{
"epoch": 3.41,
"grad_norm": 0.1286052167415619,
"learning_rate": 8.983111295589219e-06,
"loss": 0.3144,
"step": 582
},
{
"epoch": 3.41,
"grad_norm": 0.12536774575710297,
"learning_rate": 8.787556091372207e-06,
"loss": 0.313,
"step": 583
},
{
"epoch": 3.42,
"grad_norm": 0.1248733252286911,
"learning_rate": 8.594055060283269e-06,
"loss": 0.3002,
"step": 584
},
{
"epoch": 3.43,
"grad_norm": 0.12588725984096527,
"learning_rate": 8.402612560111767e-06,
"loss": 0.299,
"step": 585
},
{
"epoch": 3.43,
"grad_norm": 0.12710027396678925,
"learning_rate": 8.213232902287438e-06,
"loss": 0.3013,
"step": 586
},
{
"epoch": 3.44,
"grad_norm": 0.12748101353645325,
"learning_rate": 8.02592035178319e-06,
"loss": 0.3182,
"step": 587
},
{
"epoch": 3.44,
"grad_norm": 0.12470083683729172,
"learning_rate": 7.840679127019123e-06,
"loss": 0.3184,
"step": 588
},
{
"epoch": 3.44,
"eval_loss": 0.5052381157875061,
"eval_runtime": 21.4577,
"eval_samples_per_second": 46.277,
"eval_steps_per_second": 11.604,
"step": 588
},
{
"epoch": 3.45,
"grad_norm": 0.1215968132019043,
"learning_rate": 7.657513399767524e-06,
"loss": 0.2983,
"step": 589
},
{
"epoch": 3.46,
"grad_norm": 0.1248321607708931,
"learning_rate": 7.4764272950589185e-06,
"loss": 0.3151,
"step": 590
},
{
"epoch": 3.46,
"grad_norm": 0.12586048245429993,
"learning_rate": 7.297424891089189e-06,
"loss": 0.3088,
"step": 591
},
{
"epoch": 3.47,
"grad_norm": 0.12491127103567123,
"learning_rate": 7.120510219127619e-06,
"loss": 0.3057,
"step": 592
},
{
"epoch": 3.47,
"grad_norm": 0.1283918023109436,
"learning_rate": 6.9456872634262595e-06,
"loss": 0.3125,
"step": 593
},
{
"epoch": 3.48,
"grad_norm": 0.1247558444738388,
"learning_rate": 6.772959961130154e-06,
"loss": 0.2991,
"step": 594
},
{
"epoch": 3.49,
"grad_norm": 0.12368308752775192,
"learning_rate": 6.602332202188544e-06,
"loss": 0.3005,
"step": 595
},
{
"epoch": 3.49,
"grad_norm": 0.1240944191813469,
"learning_rate": 6.433807829267491e-06,
"loss": 0.2999,
"step": 596
},
{
"epoch": 3.5,
"grad_norm": 0.12441360950469971,
"learning_rate": 6.267390637663107e-06,
"loss": 0.298,
"step": 597
},
{
"epoch": 3.5,
"grad_norm": 0.12772201001644135,
"learning_rate": 6.103084375216273e-06,
"loss": 0.3101,
"step": 598
},
{
"epoch": 3.51,
"grad_norm": 0.13060550391674042,
"learning_rate": 5.9408927422281105e-06,
"loss": 0.3217,
"step": 599
},
{
"epoch": 3.51,
"grad_norm": 0.12049520760774612,
"learning_rate": 5.780819391376679e-06,
"loss": 0.2974,
"step": 600
},
{
"epoch": 3.52,
"grad_norm": 0.1235821545124054,
"learning_rate": 5.6228679276347676e-06,
"loss": 0.2888,
"step": 601
},
{
"epoch": 3.53,
"grad_norm": 0.1232110783457756,
"learning_rate": 5.467041908188608e-06,
"loss": 0.2978,
"step": 602
},
{
"epoch": 3.53,
"grad_norm": 0.1191987544298172,
"learning_rate": 5.313344842357859e-06,
"loss": 0.2881,
"step": 603
},
{
"epoch": 3.54,
"grad_norm": 0.12539394199848175,
"learning_rate": 5.161780191516552e-06,
"loss": 0.3133,
"step": 604
},
{
"epoch": 3.54,
"grad_norm": 0.13378071784973145,
"learning_rate": 5.012351369015067e-06,
"loss": 0.3184,
"step": 605
},
{
"epoch": 3.55,
"grad_norm": 0.12901347875595093,
"learning_rate": 4.865061740103361e-06,
"loss": 0.328,
"step": 606
},
{
"epoch": 3.56,
"grad_norm": 0.1196274608373642,
"learning_rate": 4.719914621855137e-06,
"loss": 0.2952,
"step": 607
},
{
"epoch": 3.56,
"grad_norm": 0.12590841948986053,
"learning_rate": 4.576913283093098e-06,
"loss": 0.3127,
"step": 608
},
{
"epoch": 3.57,
"grad_norm": 0.12549765408039093,
"learning_rate": 4.436060944315424e-06,
"loss": 0.3111,
"step": 609
},
{
"epoch": 3.57,
"grad_norm": 0.12531504034996033,
"learning_rate": 4.2973607776231605e-06,
"loss": 0.2965,
"step": 610
},
{
"epoch": 3.58,
"grad_norm": 0.1245151236653328,
"learning_rate": 4.160815906648796e-06,
"loss": 0.3076,
"step": 611
},
{
"epoch": 3.59,
"grad_norm": 0.12527605891227722,
"learning_rate": 4.026429406485988e-06,
"loss": 0.3064,
"step": 612
},
{
"epoch": 3.59,
"grad_norm": 0.12496983259916306,
"learning_rate": 3.894204303620197e-06,
"loss": 0.3106,
"step": 613
},
{
"epoch": 3.6,
"grad_norm": 0.1272316575050354,
"learning_rate": 3.764143575860635e-06,
"loss": 0.3146,
"step": 614
},
{
"epoch": 3.6,
"grad_norm": 0.12942712008953094,
"learning_rate": 3.6362501522730794e-06,
"loss": 0.3135,
"step": 615
},
{
"epoch": 3.61,
"grad_norm": 0.1277886927127838,
"learning_rate": 3.510526913114065e-06,
"loss": 0.3256,
"step": 616
},
{
"epoch": 3.62,
"grad_norm": 0.1255805343389511,
"learning_rate": 3.386976689765875e-06,
"loss": 0.3088,
"step": 617
},
{
"epoch": 3.62,
"grad_norm": 0.1285783350467682,
"learning_rate": 3.2656022646728625e-06,
"loss": 0.3149,
"step": 618
},
{
"epoch": 3.63,
"grad_norm": 0.1257854402065277,
"learning_rate": 3.146406371278754e-06,
"loss": 0.3093,
"step": 619
},
{
"epoch": 3.63,
"grad_norm": 0.12595035135746002,
"learning_rate": 3.029391693965089e-06,
"loss": 0.314,
"step": 620
},
{
"epoch": 3.64,
"grad_norm": 0.1260482221841812,
"learning_rate": 2.9145608679907897e-06,
"loss": 0.3074,
"step": 621
},
{
"epoch": 3.65,
"grad_norm": 0.12459404766559601,
"learning_rate": 2.8019164794327758e-06,
"loss": 0.3081,
"step": 622
},
{
"epoch": 3.65,
"grad_norm": 0.1228998526930809,
"learning_rate": 2.691461065127743e-06,
"loss": 0.2969,
"step": 623
},
{
"epoch": 3.66,
"grad_norm": 0.12393338978290558,
"learning_rate": 2.5831971126150766e-06,
"loss": 0.3037,
"step": 624
},
{
"epoch": 3.66,
"grad_norm": 0.1262238472700119,
"learning_rate": 2.4771270600807526e-06,
"loss": 0.3131,
"step": 625
},
{
"epoch": 3.67,
"grad_norm": 0.12439202517271042,
"learning_rate": 2.373253296302447e-06,
"loss": 0.2991,
"step": 626
},
{
"epoch": 3.68,
"grad_norm": 0.12303753942251205,
"learning_rate": 2.2715781605957885e-06,
"loss": 0.2966,
"step": 627
},
{
"epoch": 3.68,
"grad_norm": 0.12966549396514893,
"learning_rate": 2.1721039427616165e-06,
"loss": 0.3214,
"step": 628
},
{
"epoch": 3.69,
"grad_norm": 0.12504729628562927,
"learning_rate": 2.074832883034461e-06,
"loss": 0.2974,
"step": 629
},
{
"epoch": 3.69,
"grad_norm": 0.12392708659172058,
"learning_rate": 1.9797671720320543e-06,
"loss": 0.313,
"step": 630
},
{
"epoch": 3.69,
"eval_loss": 0.5060350298881531,
"eval_runtime": 21.4559,
"eval_samples_per_second": 46.281,
"eval_steps_per_second": 11.605,
"step": 630
},
{
"epoch": 3.7,
"grad_norm": 0.12215234339237213,
"learning_rate": 1.886908950706001e-06,
"loss": 0.2967,
"step": 631
},
{
"epoch": 3.7,
"grad_norm": 0.12799015641212463,
"learning_rate": 1.7962603102935849e-06,
"loss": 0.3234,
"step": 632
},
{
"epoch": 3.71,
"grad_norm": 0.12673823535442352,
"learning_rate": 1.7078232922706495e-06,
"loss": 0.3045,
"step": 633
},
{
"epoch": 3.72,
"grad_norm": 0.12385805696249008,
"learning_rate": 1.621599888305636e-06,
"loss": 0.3061,
"step": 634
},
{
"epoch": 3.72,
"grad_norm": 0.12683473527431488,
"learning_rate": 1.5375920402147303e-06,
"loss": 0.3212,
"step": 635
},
{
"epoch": 3.73,
"grad_norm": 0.12573249638080597,
"learning_rate": 1.4558016399181086e-06,
"loss": 0.3002,
"step": 636
},
{
"epoch": 3.73,
"grad_norm": 0.12660767138004303,
"learning_rate": 1.376230529397371e-06,
"loss": 0.3361,
"step": 637
},
{
"epoch": 3.74,
"grad_norm": 0.12542307376861572,
"learning_rate": 1.2988805006540316e-06,
"loss": 0.3037,
"step": 638
},
{
"epoch": 3.75,
"grad_norm": 0.1290496587753296,
"learning_rate": 1.22375329566915e-06,
"loss": 0.3208,
"step": 639
},
{
"epoch": 3.75,
"grad_norm": 0.12921449542045593,
"learning_rate": 1.1508506063641177e-06,
"loss": 0.3081,
"step": 640
},
{
"epoch": 3.76,
"grad_norm": 0.1243986189365387,
"learning_rate": 1.0801740745625678e-06,
"loss": 0.3094,
"step": 641
},
{
"epoch": 3.76,
"grad_norm": 0.12052708119153976,
"learning_rate": 1.0117252919533804e-06,
"loss": 0.3,
"step": 642
},
{
"epoch": 3.77,
"grad_norm": 0.12317415326833725,
"learning_rate": 9.455058000548467e-07,
"loss": 0.294,
"step": 643
},
{
"epoch": 3.78,
"grad_norm": 0.12568329274654388,
"learning_rate": 8.815170901799174e-07,
"loss": 0.3146,
"step": 644
},
{
"epoch": 3.78,
"grad_norm": 0.1256173700094223,
"learning_rate": 8.19760603402675e-07,
"loss": 0.3323,
"step": 645
},
{
"epoch": 3.79,
"grad_norm": 0.1262623518705368,
"learning_rate": 7.602377305258479e-07,
"loss": 0.3148,
"step": 646
},
{
"epoch": 3.79,
"grad_norm": 0.12715977430343628,
"learning_rate": 7.029498120494915e-07,
"loss": 0.322,
"step": 647
},
{
"epoch": 3.8,
"grad_norm": 0.12474533915519714,
"learning_rate": 6.478981381408011e-07,
"loss": 0.3114,
"step": 648
},
{
"epoch": 3.81,
"grad_norm": 0.12636886537075043,
"learning_rate": 5.950839486050464e-07,
"loss": 0.3168,
"step": 649
},
{
"epoch": 3.81,
"grad_norm": 0.165486678481102,
"learning_rate": 5.445084328576711e-07,
"loss": 0.3021,
"step": 650
},
{
"epoch": 3.82,
"grad_norm": 0.12697450816631317,
"learning_rate": 4.961727298974817e-07,
"loss": 0.3034,
"step": 651
},
{
"epoch": 3.82,
"grad_norm": 0.12721647322177887,
"learning_rate": 4.50077928281023e-07,
"loss": 0.3184,
"step": 652
},
{
"epoch": 3.83,
"grad_norm": 0.12573368847370148,
"learning_rate": 4.062250660980427e-07,
"loss": 0.2971,
"step": 653
},
{
"epoch": 3.84,
"grad_norm": 0.12567584216594696,
"learning_rate": 3.646151309481094e-07,
"loss": 0.3134,
"step": 654
},
{
"epoch": 3.84,
"grad_norm": 0.12959103286266327,
"learning_rate": 3.252490599183977e-07,
"loss": 0.3135,
"step": 655
},
{
"epoch": 3.85,
"grad_norm": 0.12764880061149597,
"learning_rate": 2.8812773956256035e-07,
"loss": 0.3023,
"step": 656
},
{
"epoch": 3.85,
"grad_norm": 0.12387152761220932,
"learning_rate": 2.5325200588076634e-07,
"loss": 0.3104,
"step": 657
},
{
"epoch": 3.86,
"grad_norm": 0.12274641543626785,
"learning_rate": 2.2062264430087188e-07,
"loss": 0.3013,
"step": 658
},
{
"epoch": 3.86,
"grad_norm": 0.12433410435914993,
"learning_rate": 1.902403896607563e-07,
"loss": 0.3065,
"step": 659
},
{
"epoch": 3.87,
"grad_norm": 0.12338897585868835,
"learning_rate": 1.621059261917357e-07,
"loss": 0.2987,
"step": 660
},
{
"epoch": 3.88,
"grad_norm": 0.12342184036970139,
"learning_rate": 1.3621988750317505e-07,
"loss": 0.2953,
"step": 661
},
{
"epoch": 3.88,
"grad_norm": 0.12608054280281067,
"learning_rate": 1.1258285656822188e-07,
"loss": 0.2911,
"step": 662
},
{
"epoch": 3.89,
"grad_norm": 0.13148155808448792,
"learning_rate": 9.119536571066123e-08,
"loss": 0.328,
"step": 663
},
{
"epoch": 3.89,
"grad_norm": 0.12618465721607208,
"learning_rate": 7.205789659294748e-08,
"loss": 0.3065,
"step": 664
},
{
"epoch": 3.9,
"grad_norm": 0.12973222136497498,
"learning_rate": 5.517088020533523e-08,
"loss": 0.3028,
"step": 665
},
{
"epoch": 3.91,
"grad_norm": 0.12376265227794647,
"learning_rate": 4.053469685617595e-08,
"loss": 0.2912,
"step": 666
},
{
"epoch": 3.91,
"grad_norm": 0.12581373751163483,
"learning_rate": 2.814967616339148e-08,
"loss": 0.2984,
"step": 667
},
{
"epoch": 3.92,
"grad_norm": 0.12564615905284882,
"learning_rate": 1.8016097047002244e-08,
"loss": 0.3115,
"step": 668
},
{
"epoch": 3.92,
"grad_norm": 0.12530925869941711,
"learning_rate": 1.013418772285446e-08,
"loss": 0.304,
"step": 669
},
{
"epoch": 3.93,
"grad_norm": 0.13158272206783295,
"learning_rate": 4.504125697490924e-09,
"loss": 0.3125,
"step": 670
},
{
"epoch": 3.94,
"grad_norm": 0.12446756660938263,
"learning_rate": 1.1260377641764131e-09,
"loss": 0.3059,
"step": 671
},
{
"epoch": 3.94,
"grad_norm": 0.12879742681980133,
"learning_rate": 0.0,
"loss": 0.3097,
"step": 672
},
{
"epoch": 3.94,
"eval_loss": 0.505996823310852,
"eval_runtime": 21.4647,
"eval_samples_per_second": 46.262,
"eval_steps_per_second": 11.6,
"step": 672
}
],
"logging_steps": 1,
"max_steps": 672,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 168,
"total_flos": 2.0052760954184663e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}