galgame-whisper-wip / trainer_state.json
litagin's picture
Upload 12 files
ee42893 verified
raw
history blame
191 kB
{
"best_metric": 18.436363636363637,
"best_model_checkpoint": "galgame-whisper\\checkpoint-24500",
"epoch": 13.0,
"eval_steps": 500,
"global_step": 26624,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01220703125,
"grad_norm": 11.14741039276123,
"learning_rate": 4.600000000000001e-06,
"loss": 1.7805,
"step": 25
},
{
"epoch": 0.0244140625,
"grad_norm": 6.63244104385376,
"learning_rate": 9.600000000000001e-06,
"loss": 1.3099,
"step": 50
},
{
"epoch": 0.03662109375,
"grad_norm": 4.578396320343018,
"learning_rate": 1e-05,
"loss": 0.9171,
"step": 75
},
{
"epoch": 0.048828125,
"grad_norm": 5.572494029998779,
"learning_rate": 1e-05,
"loss": 0.6639,
"step": 100
},
{
"epoch": 0.048828125,
"eval_cer": 25.6,
"eval_loss": 0.593059778213501,
"eval_normalized_cer": 20.427455815865187,
"eval_runtime": 123.1903,
"eval_samples_per_second": 1.039,
"eval_steps_per_second": 0.13,
"step": 100
},
{
"epoch": 0.06103515625,
"grad_norm": 5.427572250366211,
"learning_rate": 1e-05,
"loss": 0.6159,
"step": 125
},
{
"epoch": 0.0732421875,
"grad_norm": 4.667797565460205,
"learning_rate": 1e-05,
"loss": 0.5685,
"step": 150
},
{
"epoch": 0.08544921875,
"grad_norm": 4.875271797180176,
"learning_rate": 1e-05,
"loss": 0.5758,
"step": 175
},
{
"epoch": 0.09765625,
"grad_norm": 4.109724044799805,
"learning_rate": 1e-05,
"loss": 0.5772,
"step": 200
},
{
"epoch": 0.09765625,
"eval_cer": 25.418181818181818,
"eval_loss": 0.567274808883667,
"eval_normalized_cer": 20.34525277435265,
"eval_runtime": 140.399,
"eval_samples_per_second": 0.912,
"eval_steps_per_second": 0.114,
"step": 200
},
{
"epoch": 0.10986328125,
"grad_norm": 4.04423189163208,
"learning_rate": 1e-05,
"loss": 0.5723,
"step": 225
},
{
"epoch": 0.1220703125,
"grad_norm": 4.566165447235107,
"learning_rate": 1e-05,
"loss": 0.6474,
"step": 250
},
{
"epoch": 0.13427734375,
"grad_norm": 4.996761798858643,
"learning_rate": 1e-05,
"loss": 0.611,
"step": 275
},
{
"epoch": 0.146484375,
"grad_norm": 4.392452716827393,
"learning_rate": 1e-05,
"loss": 0.5462,
"step": 300
},
{
"epoch": 0.146484375,
"eval_cer": 22.21818181818182,
"eval_loss": 0.5370786190032959,
"eval_normalized_cer": 17.673653925195232,
"eval_runtime": 134.3037,
"eval_samples_per_second": 0.953,
"eval_steps_per_second": 0.119,
"step": 300
},
{
"epoch": 0.15869140625,
"grad_norm": 3.719304084777832,
"learning_rate": 1e-05,
"loss": 0.5986,
"step": 325
},
{
"epoch": 0.1708984375,
"grad_norm": 11.704819679260254,
"learning_rate": 1e-05,
"loss": 0.6941,
"step": 350
},
{
"epoch": 0.18310546875,
"grad_norm": 5.188851833343506,
"learning_rate": 1e-05,
"loss": 0.5486,
"step": 375
},
{
"epoch": 0.1953125,
"grad_norm": 3.8557848930358887,
"learning_rate": 1e-05,
"loss": 0.6085,
"step": 400
},
{
"epoch": 0.1953125,
"eval_cer": 21.854545454545455,
"eval_loss": 0.5271878838539124,
"eval_normalized_cer": 17.221537196876284,
"eval_runtime": 131.3514,
"eval_samples_per_second": 0.974,
"eval_steps_per_second": 0.122,
"step": 400
},
{
"epoch": 0.20751953125,
"grad_norm": 3.7432358264923096,
"learning_rate": 1e-05,
"loss": 0.6387,
"step": 425
},
{
"epoch": 0.2197265625,
"grad_norm": 3.6960973739624023,
"learning_rate": 1e-05,
"loss": 0.5481,
"step": 450
},
{
"epoch": 0.23193359375,
"grad_norm": 5.312159061431885,
"learning_rate": 1e-05,
"loss": 0.5565,
"step": 475
},
{
"epoch": 0.244140625,
"grad_norm": 5.376567363739014,
"learning_rate": 1e-05,
"loss": 0.5262,
"step": 500
},
{
"epoch": 0.244140625,
"eval_cer": 24.509090909090908,
"eval_loss": 0.5245115756988525,
"eval_normalized_cer": 19.482120838471022,
"eval_runtime": 136.6132,
"eval_samples_per_second": 0.937,
"eval_steps_per_second": 0.117,
"step": 500
},
{
"epoch": 0.25634765625,
"grad_norm": 3.251194715499878,
"learning_rate": 1e-05,
"loss": 0.5751,
"step": 525
},
{
"epoch": 0.2685546875,
"grad_norm": 3.575839042663574,
"learning_rate": 1e-05,
"loss": 0.5391,
"step": 550
},
{
"epoch": 0.28076171875,
"grad_norm": 4.032149314880371,
"learning_rate": 1e-05,
"loss": 0.5314,
"step": 575
},
{
"epoch": 0.29296875,
"grad_norm": 4.987039566040039,
"learning_rate": 1e-05,
"loss": 0.576,
"step": 600
},
{
"epoch": 0.29296875,
"eval_cer": 22.69090909090909,
"eval_loss": 0.5259984135627747,
"eval_normalized_cer": 18.207973695026716,
"eval_runtime": 127.1247,
"eval_samples_per_second": 1.007,
"eval_steps_per_second": 0.126,
"step": 600
},
{
"epoch": 0.30517578125,
"grad_norm": 4.042311191558838,
"learning_rate": 1e-05,
"loss": 0.5671,
"step": 625
},
{
"epoch": 0.3173828125,
"grad_norm": 5.254710674285889,
"learning_rate": 1e-05,
"loss": 0.5525,
"step": 650
},
{
"epoch": 0.32958984375,
"grad_norm": 3.7549564838409424,
"learning_rate": 1e-05,
"loss": 0.5489,
"step": 675
},
{
"epoch": 0.341796875,
"grad_norm": 4.815126419067383,
"learning_rate": 1e-05,
"loss": 0.5844,
"step": 700
},
{
"epoch": 0.341796875,
"eval_cer": 26.581818181818186,
"eval_loss": 0.5143883228302002,
"eval_normalized_cer": 21.98931360460337,
"eval_runtime": 125.4245,
"eval_samples_per_second": 1.021,
"eval_steps_per_second": 0.128,
"step": 700
},
{
"epoch": 0.35400390625,
"grad_norm": 3.240372657775879,
"learning_rate": 1e-05,
"loss": 0.5684,
"step": 725
},
{
"epoch": 0.3662109375,
"grad_norm": 4.83163595199585,
"learning_rate": 1e-05,
"loss": 0.5157,
"step": 750
},
{
"epoch": 0.37841796875,
"grad_norm": 3.209136724472046,
"learning_rate": 1e-05,
"loss": 0.5348,
"step": 775
},
{
"epoch": 0.390625,
"grad_norm": 3.9407966136932373,
"learning_rate": 1e-05,
"loss": 0.5689,
"step": 800
},
{
"epoch": 0.390625,
"eval_cer": 23.200000000000003,
"eval_loss": 0.5060851573944092,
"eval_normalized_cer": 18.372379778051787,
"eval_runtime": 132.0722,
"eval_samples_per_second": 0.969,
"eval_steps_per_second": 0.121,
"step": 800
},
{
"epoch": 0.40283203125,
"grad_norm": 5.201825141906738,
"learning_rate": 1e-05,
"loss": 0.5981,
"step": 825
},
{
"epoch": 0.4150390625,
"grad_norm": 4.420695781707764,
"learning_rate": 1e-05,
"loss": 0.6028,
"step": 850
},
{
"epoch": 0.42724609375,
"grad_norm": 4.9300856590271,
"learning_rate": 1e-05,
"loss": 0.5807,
"step": 875
},
{
"epoch": 0.439453125,
"grad_norm": 4.098893165588379,
"learning_rate": 1e-05,
"loss": 0.5731,
"step": 900
},
{
"epoch": 0.439453125,
"eval_cer": 22.0,
"eval_loss": 0.4978080987930298,
"eval_normalized_cer": 17.7147554459515,
"eval_runtime": 131.4521,
"eval_samples_per_second": 0.974,
"eval_steps_per_second": 0.122,
"step": 900
},
{
"epoch": 0.45166015625,
"grad_norm": 3.947633981704712,
"learning_rate": 1e-05,
"loss": 0.4836,
"step": 925
},
{
"epoch": 0.4638671875,
"grad_norm": 3.02842378616333,
"learning_rate": 1e-05,
"loss": 0.5248,
"step": 950
},
{
"epoch": 0.47607421875,
"grad_norm": 5.237096309661865,
"learning_rate": 1e-05,
"loss": 0.5299,
"step": 975
},
{
"epoch": 0.48828125,
"grad_norm": 4.260601997375488,
"learning_rate": 1e-05,
"loss": 0.5058,
"step": 1000
},
{
"epoch": 0.48828125,
"eval_cer": 23.599999999999998,
"eval_loss": 0.494682252407074,
"eval_normalized_cer": 18.988902589395806,
"eval_runtime": 132.3816,
"eval_samples_per_second": 0.967,
"eval_steps_per_second": 0.121,
"step": 1000
},
{
"epoch": 0.50048828125,
"grad_norm": 4.105225086212158,
"learning_rate": 1e-05,
"loss": 0.4992,
"step": 1025
},
{
"epoch": 0.5126953125,
"grad_norm": 4.636739730834961,
"learning_rate": 1e-05,
"loss": 0.5517,
"step": 1050
},
{
"epoch": 0.52490234375,
"grad_norm": 4.676263809204102,
"learning_rate": 1e-05,
"loss": 0.511,
"step": 1075
},
{
"epoch": 0.537109375,
"grad_norm": 4.2934465408325195,
"learning_rate": 1e-05,
"loss": 0.5556,
"step": 1100
},
{
"epoch": 0.537109375,
"eval_cer": 23.454545454545457,
"eval_loss": 0.4881322383880615,
"eval_normalized_cer": 19.399917796958487,
"eval_runtime": 148.1276,
"eval_samples_per_second": 0.864,
"eval_steps_per_second": 0.054,
"step": 1100
},
{
"epoch": 0.54931640625,
"grad_norm": 3.7905611991882324,
"learning_rate": 1e-05,
"loss": 0.5309,
"step": 1125
},
{
"epoch": 0.5615234375,
"grad_norm": 3.6019198894500732,
"learning_rate": 1e-05,
"loss": 0.4845,
"step": 1150
},
{
"epoch": 0.57373046875,
"grad_norm": 3.77414870262146,
"learning_rate": 1e-05,
"loss": 0.491,
"step": 1175
},
{
"epoch": 0.5859375,
"grad_norm": 3.13875412940979,
"learning_rate": 1e-05,
"loss": 0.5033,
"step": 1200
},
{
"epoch": 0.5859375,
"eval_cer": 23.81818181818182,
"eval_loss": 0.48237502574920654,
"eval_normalized_cer": 19.31771475544595,
"eval_runtime": 167.3993,
"eval_samples_per_second": 0.765,
"eval_steps_per_second": 0.048,
"step": 1200
},
{
"epoch": 0.59814453125,
"grad_norm": 5.719494819641113,
"learning_rate": 1e-05,
"loss": 0.5085,
"step": 1225
},
{
"epoch": 0.6103515625,
"grad_norm": 3.9417619705200195,
"learning_rate": 1e-05,
"loss": 0.4764,
"step": 1250
},
{
"epoch": 0.62255859375,
"grad_norm": 4.024901390075684,
"learning_rate": 1e-05,
"loss": 0.4834,
"step": 1275
},
{
"epoch": 0.634765625,
"grad_norm": 4.798065185546875,
"learning_rate": 1e-05,
"loss": 0.5041,
"step": 1300
},
{
"epoch": 0.634765625,
"eval_cer": 21.672727272727276,
"eval_loss": 0.47094789147377014,
"eval_normalized_cer": 17.632552404438965,
"eval_runtime": 167.9645,
"eval_samples_per_second": 0.762,
"eval_steps_per_second": 0.048,
"step": 1300
},
{
"epoch": 0.64697265625,
"grad_norm": 3.018202781677246,
"learning_rate": 1e-05,
"loss": 0.5181,
"step": 1325
},
{
"epoch": 0.6591796875,
"grad_norm": 4.9327311515808105,
"learning_rate": 1e-05,
"loss": 0.5419,
"step": 1350
},
{
"epoch": 0.67138671875,
"grad_norm": 4.631186485290527,
"learning_rate": 1e-05,
"loss": 0.4995,
"step": 1375
},
{
"epoch": 0.68359375,
"grad_norm": 5.249241828918457,
"learning_rate": 1e-05,
"loss": 0.5324,
"step": 1400
},
{
"epoch": 0.68359375,
"eval_cer": 21.01818181818182,
"eval_loss": 0.4812983274459839,
"eval_normalized_cer": 16.605014385532264,
"eval_runtime": 164.7775,
"eval_samples_per_second": 0.777,
"eval_steps_per_second": 0.049,
"step": 1400
},
{
"epoch": 0.69580078125,
"grad_norm": 3.599472761154175,
"learning_rate": 1e-05,
"loss": 0.5059,
"step": 1425
},
{
"epoch": 0.7080078125,
"grad_norm": 2.960740089416504,
"learning_rate": 1e-05,
"loss": 0.474,
"step": 1450
},
{
"epoch": 0.72021484375,
"grad_norm": 5.463324546813965,
"learning_rate": 1e-05,
"loss": 0.4646,
"step": 1475
},
{
"epoch": 0.732421875,
"grad_norm": 2.3991730213165283,
"learning_rate": 1e-05,
"loss": 0.5374,
"step": 1500
},
{
"epoch": 0.732421875,
"eval_cer": 21.30909090909091,
"eval_loss": 0.4700665771961212,
"eval_normalized_cer": 17.057131113851213,
"eval_runtime": 168.2533,
"eval_samples_per_second": 0.761,
"eval_steps_per_second": 0.048,
"step": 1500
},
{
"epoch": 0.74462890625,
"grad_norm": 3.7515509128570557,
"learning_rate": 1e-05,
"loss": 0.4979,
"step": 1525
},
{
"epoch": 0.7568359375,
"grad_norm": 3.6961734294891357,
"learning_rate": 1e-05,
"loss": 0.5254,
"step": 1550
},
{
"epoch": 0.76904296875,
"grad_norm": 5.233846187591553,
"learning_rate": 1e-05,
"loss": 0.5757,
"step": 1575
},
{
"epoch": 0.78125,
"grad_norm": 3.659038543701172,
"learning_rate": 1e-05,
"loss": 0.4875,
"step": 1600
},
{
"epoch": 0.78125,
"eval_cer": 20.872727272727275,
"eval_loss": 0.4664301574230194,
"eval_normalized_cer": 16.48170982326346,
"eval_runtime": 170.1628,
"eval_samples_per_second": 0.752,
"eval_steps_per_second": 0.047,
"step": 1600
},
{
"epoch": 0.79345703125,
"grad_norm": 3.4504241943359375,
"learning_rate": 1e-05,
"loss": 0.4794,
"step": 1625
},
{
"epoch": 0.8056640625,
"grad_norm": 3.2377774715423584,
"learning_rate": 1e-05,
"loss": 0.5338,
"step": 1650
},
{
"epoch": 0.81787109375,
"grad_norm": 3.6238794326782227,
"learning_rate": 1e-05,
"loss": 0.4747,
"step": 1675
},
{
"epoch": 0.830078125,
"grad_norm": 3.8324549198150635,
"learning_rate": 1e-05,
"loss": 0.5155,
"step": 1700
},
{
"epoch": 0.830078125,
"eval_cer": 22.254545454545454,
"eval_loss": 0.4597916305065155,
"eval_normalized_cer": 18.12577065351418,
"eval_runtime": 166.0402,
"eval_samples_per_second": 0.771,
"eval_steps_per_second": 0.048,
"step": 1700
},
{
"epoch": 0.84228515625,
"grad_norm": 4.575786113739014,
"learning_rate": 1e-05,
"loss": 0.515,
"step": 1725
},
{
"epoch": 0.8544921875,
"grad_norm": 3.636530637741089,
"learning_rate": 1e-05,
"loss": 0.4976,
"step": 1750
},
{
"epoch": 0.86669921875,
"grad_norm": 4.921797752380371,
"learning_rate": 1e-05,
"loss": 0.5211,
"step": 1775
},
{
"epoch": 0.87890625,
"grad_norm": 3.620969533920288,
"learning_rate": 1e-05,
"loss": 0.4824,
"step": 1800
},
{
"epoch": 0.87890625,
"eval_cer": 26.181818181818183,
"eval_loss": 0.46164897084236145,
"eval_normalized_cer": 21.537196876284423,
"eval_runtime": 150.2,
"eval_samples_per_second": 0.852,
"eval_steps_per_second": 0.053,
"step": 1800
},
{
"epoch": 0.89111328125,
"grad_norm": 4.432845592498779,
"learning_rate": 1e-05,
"loss": 0.5138,
"step": 1825
},
{
"epoch": 0.9033203125,
"grad_norm": 3.5764822959899902,
"learning_rate": 1e-05,
"loss": 0.5413,
"step": 1850
},
{
"epoch": 0.91552734375,
"grad_norm": 4.142552852630615,
"learning_rate": 1e-05,
"loss": 0.4824,
"step": 1875
},
{
"epoch": 0.927734375,
"grad_norm": 3.680915594100952,
"learning_rate": 1e-05,
"loss": 0.5295,
"step": 1900
},
{
"epoch": 0.927734375,
"eval_cer": 20.327272727272728,
"eval_loss": 0.45038798451423645,
"eval_normalized_cer": 15.906288532675708,
"eval_runtime": 173.3587,
"eval_samples_per_second": 0.738,
"eval_steps_per_second": 0.046,
"step": 1900
},
{
"epoch": 0.93994140625,
"grad_norm": 2.5720534324645996,
"learning_rate": 1e-05,
"loss": 0.5137,
"step": 1925
},
{
"epoch": 0.9521484375,
"grad_norm": 2.782604455947876,
"learning_rate": 1e-05,
"loss": 0.5086,
"step": 1950
},
{
"epoch": 0.96435546875,
"grad_norm": 4.596216201782227,
"learning_rate": 1e-05,
"loss": 0.525,
"step": 1975
},
{
"epoch": 0.9765625,
"grad_norm": 4.53118371963501,
"learning_rate": 1e-05,
"loss": 0.5004,
"step": 2000
},
{
"epoch": 0.9765625,
"eval_cer": 22.145454545454545,
"eval_loss": 0.4644533395767212,
"eval_normalized_cer": 17.26263871763255,
"eval_runtime": 170.2375,
"eval_samples_per_second": 0.752,
"eval_steps_per_second": 0.047,
"step": 2000
},
{
"epoch": 0.98876953125,
"grad_norm": 3.8377697467803955,
"learning_rate": 1e-05,
"loss": 0.5364,
"step": 2025
},
{
"epoch": 1.0009765625,
"grad_norm": 6.334020614624023,
"learning_rate": 1e-05,
"loss": 0.619,
"step": 2050
},
{
"epoch": 1.01318359375,
"grad_norm": 4.159861087799072,
"learning_rate": 1e-05,
"loss": 0.5219,
"step": 2075
},
{
"epoch": 1.025390625,
"grad_norm": 3.607752799987793,
"learning_rate": 1e-05,
"loss": 0.529,
"step": 2100
},
{
"epoch": 1.03759765625,
"grad_norm": 5.036830425262451,
"learning_rate": 1e-05,
"loss": 0.5207,
"step": 2125
},
{
"epoch": 1.0498046875,
"grad_norm": 3.520869016647339,
"learning_rate": 1e-05,
"loss": 0.5213,
"step": 2150
},
{
"epoch": 1.06201171875,
"grad_norm": 4.612700939178467,
"learning_rate": 1e-05,
"loss": 0.4954,
"step": 2175
},
{
"epoch": 1.07421875,
"grad_norm": 4.156740188598633,
"learning_rate": 1e-05,
"loss": 0.5406,
"step": 2200
},
{
"epoch": 1.08642578125,
"grad_norm": 3.5834848880767822,
"learning_rate": 1e-05,
"loss": 0.5097,
"step": 2225
},
{
"epoch": 1.0986328125,
"grad_norm": 3.4885971546173096,
"learning_rate": 1e-05,
"loss": 0.4756,
"step": 2250
},
{
"epoch": 1.11083984375,
"grad_norm": 5.202791213989258,
"learning_rate": 1e-05,
"loss": 0.5375,
"step": 2275
},
{
"epoch": 1.123046875,
"grad_norm": 2.762514114379883,
"learning_rate": 1e-05,
"loss": 0.4792,
"step": 2300
},
{
"epoch": 1.13525390625,
"grad_norm": 2.9590845108032227,
"learning_rate": 1e-05,
"loss": 0.4671,
"step": 2325
},
{
"epoch": 1.1474609375,
"grad_norm": 2.85101318359375,
"learning_rate": 1e-05,
"loss": 0.4856,
"step": 2350
},
{
"epoch": 1.15966796875,
"grad_norm": 3.444801092147827,
"learning_rate": 1e-05,
"loss": 0.5393,
"step": 2375
},
{
"epoch": 1.171875,
"grad_norm": 4.466598033905029,
"learning_rate": 1e-05,
"loss": 0.5304,
"step": 2400
},
{
"epoch": 1.18408203125,
"grad_norm": 3.2998430728912354,
"learning_rate": 1e-05,
"loss": 0.5008,
"step": 2425
},
{
"epoch": 1.1962890625,
"grad_norm": 3.405848264694214,
"learning_rate": 1e-05,
"loss": 0.4788,
"step": 2450
},
{
"epoch": 1.20849609375,
"grad_norm": 3.5294339656829834,
"learning_rate": 1e-05,
"loss": 0.4954,
"step": 2475
},
{
"epoch": 1.220703125,
"grad_norm": 3.5182113647460938,
"learning_rate": 1e-05,
"loss": 0.5109,
"step": 2500
},
{
"epoch": 1.220703125,
"eval_cer": 22.8,
"eval_loss": 0.449319064617157,
"eval_normalized_cer": 18.495684340320594,
"eval_runtime": 122.2523,
"eval_samples_per_second": 1.047,
"eval_steps_per_second": 0.065,
"step": 2500
},
{
"epoch": 1.23291015625,
"grad_norm": 4.339564800262451,
"learning_rate": 1e-05,
"loss": 0.5178,
"step": 2525
},
{
"epoch": 1.2451171875,
"grad_norm": 3.9555203914642334,
"learning_rate": 1e-05,
"loss": 0.4995,
"step": 2550
},
{
"epoch": 1.25732421875,
"grad_norm": 2.8713884353637695,
"learning_rate": 1e-05,
"loss": 0.4892,
"step": 2575
},
{
"epoch": 1.26953125,
"grad_norm": 3.2301995754241943,
"learning_rate": 1e-05,
"loss": 0.4916,
"step": 2600
},
{
"epoch": 1.28173828125,
"grad_norm": Infinity,
"learning_rate": 1e-05,
"loss": 0.5373,
"step": 2625
},
{
"epoch": 1.2939453125,
"grad_norm": 3.3091487884521484,
"learning_rate": 1e-05,
"loss": 0.4517,
"step": 2650
},
{
"epoch": 1.30615234375,
"grad_norm": 5.547422409057617,
"learning_rate": 1e-05,
"loss": 0.5115,
"step": 2675
},
{
"epoch": 1.318359375,
"grad_norm": 4.931210517883301,
"learning_rate": 1e-05,
"loss": 0.4886,
"step": 2700
},
{
"epoch": 1.33056640625,
"grad_norm": 4.160281658172607,
"learning_rate": 1e-05,
"loss": 0.4653,
"step": 2725
},
{
"epoch": 1.3427734375,
"grad_norm": 3.172577381134033,
"learning_rate": 1e-05,
"loss": 0.5092,
"step": 2750
},
{
"epoch": 1.35498046875,
"grad_norm": 4.536301612854004,
"learning_rate": 1e-05,
"loss": 0.5189,
"step": 2775
},
{
"epoch": 1.3671875,
"grad_norm": 4.744750499725342,
"learning_rate": 1e-05,
"loss": 0.4882,
"step": 2800
},
{
"epoch": 1.37939453125,
"grad_norm": 4.030979633331299,
"learning_rate": 1e-05,
"loss": 0.4538,
"step": 2825
},
{
"epoch": 1.3916015625,
"grad_norm": 4.93550443649292,
"learning_rate": 1e-05,
"loss": 0.4778,
"step": 2850
},
{
"epoch": 1.40380859375,
"grad_norm": 3.9617207050323486,
"learning_rate": 1e-05,
"loss": 0.4542,
"step": 2875
},
{
"epoch": 1.416015625,
"grad_norm": 2.711639642715454,
"learning_rate": 1e-05,
"loss": 0.501,
"step": 2900
},
{
"epoch": 1.42822265625,
"grad_norm": 3.6887452602386475,
"learning_rate": 1e-05,
"loss": 0.4909,
"step": 2925
},
{
"epoch": 1.4404296875,
"grad_norm": 4.250792026519775,
"learning_rate": 1e-05,
"loss": 0.4814,
"step": 2950
},
{
"epoch": 1.45263671875,
"grad_norm": 3.804023265838623,
"learning_rate": 1e-05,
"loss": 0.4922,
"step": 2975
},
{
"epoch": 1.46484375,
"grad_norm": 4.579716205596924,
"learning_rate": 1e-05,
"loss": 0.4816,
"step": 3000
},
{
"epoch": 1.46484375,
"eval_cer": 21.163636363636364,
"eval_loss": 0.4456084370613098,
"eval_normalized_cer": 16.6872174270448,
"eval_runtime": 143.2684,
"eval_samples_per_second": 0.893,
"eval_steps_per_second": 0.056,
"step": 3000
},
{
"epoch": 1.47705078125,
"grad_norm": 6.303469657897949,
"learning_rate": 1e-05,
"loss": 0.5534,
"step": 3025
},
{
"epoch": 1.4892578125,
"grad_norm": 3.4602832794189453,
"learning_rate": 1e-05,
"loss": 0.5466,
"step": 3050
},
{
"epoch": 1.50146484375,
"grad_norm": 5.2441205978393555,
"learning_rate": 1e-05,
"loss": 0.4971,
"step": 3075
},
{
"epoch": 1.513671875,
"grad_norm": 4.630413055419922,
"learning_rate": 1e-05,
"loss": 0.4965,
"step": 3100
},
{
"epoch": 1.52587890625,
"grad_norm": 4.705837726593018,
"learning_rate": 1e-05,
"loss": 0.4555,
"step": 3125
},
{
"epoch": 1.5380859375,
"grad_norm": 5.804379463195801,
"learning_rate": 1e-05,
"loss": 0.5324,
"step": 3150
},
{
"epoch": 1.55029296875,
"grad_norm": 4.1124701499938965,
"learning_rate": 1e-05,
"loss": 0.4799,
"step": 3175
},
{
"epoch": 1.5625,
"grad_norm": 2.9505362510681152,
"learning_rate": 1e-05,
"loss": 0.4615,
"step": 3200
},
{
"epoch": 1.57470703125,
"grad_norm": 4.232094764709473,
"learning_rate": 1e-05,
"loss": 0.5032,
"step": 3225
},
{
"epoch": 1.5869140625,
"grad_norm": 3.584272861480713,
"learning_rate": 1e-05,
"loss": 0.429,
"step": 3250
},
{
"epoch": 1.59912109375,
"grad_norm": 4.316075801849365,
"learning_rate": 1e-05,
"loss": 0.5184,
"step": 3275
},
{
"epoch": 1.611328125,
"grad_norm": 2.490178108215332,
"learning_rate": 1e-05,
"loss": 0.5072,
"step": 3300
},
{
"epoch": 1.62353515625,
"grad_norm": 4.204127311706543,
"learning_rate": 1e-05,
"loss": 0.4874,
"step": 3325
},
{
"epoch": 1.6357421875,
"grad_norm": 3.575812339782715,
"learning_rate": 1e-05,
"loss": 0.4741,
"step": 3350
},
{
"epoch": 1.64794921875,
"grad_norm": 3.3744544982910156,
"learning_rate": 1e-05,
"loss": 0.4931,
"step": 3375
},
{
"epoch": 1.66015625,
"grad_norm": 3.7953217029571533,
"learning_rate": 1e-05,
"loss": 0.434,
"step": 3400
},
{
"epoch": 1.67236328125,
"grad_norm": 3.54185152053833,
"learning_rate": 1e-05,
"loss": 0.449,
"step": 3425
},
{
"epoch": 1.6845703125,
"grad_norm": 3.5956945419311523,
"learning_rate": 1e-05,
"loss": 0.4836,
"step": 3450
},
{
"epoch": 1.69677734375,
"grad_norm": 3.323965072631836,
"learning_rate": 1e-05,
"loss": 0.5173,
"step": 3475
},
{
"epoch": 1.708984375,
"grad_norm": 3.515545606613159,
"learning_rate": 1e-05,
"loss": 0.4816,
"step": 3500
},
{
"epoch": 1.708984375,
"eval_cer": 22.472727272727273,
"eval_loss": 0.44189023971557617,
"eval_normalized_cer": 18.12577065351418,
"eval_runtime": 125.2883,
"eval_samples_per_second": 1.022,
"eval_steps_per_second": 0.064,
"step": 3500
},
{
"epoch": 1.72119140625,
"grad_norm": 2.902378797531128,
"learning_rate": 1e-05,
"loss": 0.4416,
"step": 3525
},
{
"epoch": 1.7333984375,
"grad_norm": 5.24100399017334,
"learning_rate": 1e-05,
"loss": 0.4477,
"step": 3550
},
{
"epoch": 1.74560546875,
"grad_norm": 2.840927839279175,
"learning_rate": 1e-05,
"loss": 0.4683,
"step": 3575
},
{
"epoch": 1.7578125,
"grad_norm": 3.2699358463287354,
"learning_rate": 1e-05,
"loss": 0.4806,
"step": 3600
},
{
"epoch": 1.77001953125,
"grad_norm": 3.1949098110198975,
"learning_rate": 1e-05,
"loss": 0.4652,
"step": 3625
},
{
"epoch": 1.7822265625,
"grad_norm": 2.1296651363372803,
"learning_rate": 1e-05,
"loss": 0.473,
"step": 3650
},
{
"epoch": 1.79443359375,
"grad_norm": 2.817379951477051,
"learning_rate": 1e-05,
"loss": 0.5153,
"step": 3675
},
{
"epoch": 1.806640625,
"grad_norm": 3.289232015609741,
"learning_rate": 1e-05,
"loss": 0.4662,
"step": 3700
},
{
"epoch": 1.81884765625,
"grad_norm": 5.639113426208496,
"learning_rate": 1e-05,
"loss": 0.4773,
"step": 3725
},
{
"epoch": 1.8310546875,
"grad_norm": 3.2733285427093506,
"learning_rate": 1e-05,
"loss": 0.4468,
"step": 3750
},
{
"epoch": 1.84326171875,
"grad_norm": 3.131972312927246,
"learning_rate": 1e-05,
"loss": 0.5031,
"step": 3775
},
{
"epoch": 1.85546875,
"grad_norm": 3.908076524734497,
"learning_rate": 1e-05,
"loss": 0.5018,
"step": 3800
},
{
"epoch": 1.86767578125,
"grad_norm": 3.57212233543396,
"learning_rate": 1e-05,
"loss": 0.4862,
"step": 3825
},
{
"epoch": 1.8798828125,
"grad_norm": 3.8183035850524902,
"learning_rate": 1e-05,
"loss": 0.4953,
"step": 3850
},
{
"epoch": 1.89208984375,
"grad_norm": 3.292840003967285,
"learning_rate": 1e-05,
"loss": 0.4726,
"step": 3875
},
{
"epoch": 1.904296875,
"grad_norm": 2.816706418991089,
"learning_rate": 1e-05,
"loss": 0.4925,
"step": 3900
},
{
"epoch": 1.91650390625,
"grad_norm": 4.149738311767578,
"learning_rate": 1e-05,
"loss": 0.4868,
"step": 3925
},
{
"epoch": 1.9287109375,
"grad_norm": 3.7483160495758057,
"learning_rate": 1e-05,
"loss": 0.4371,
"step": 3950
},
{
"epoch": 1.94091796875,
"grad_norm": 3.3911263942718506,
"learning_rate": 1e-05,
"loss": 0.4279,
"step": 3975
},
{
"epoch": 1.953125,
"grad_norm": 4.811086654663086,
"learning_rate": 1e-05,
"loss": 0.4482,
"step": 4000
},
{
"epoch": 1.953125,
"eval_cer": 23.054545454545455,
"eval_loss": 0.43437713384628296,
"eval_normalized_cer": 18.66009042334566,
"eval_runtime": 152.761,
"eval_samples_per_second": 0.838,
"eval_steps_per_second": 0.052,
"step": 4000
},
{
"epoch": 1.96533203125,
"grad_norm": 2.9040095806121826,
"learning_rate": 1e-05,
"loss": 0.448,
"step": 4025
},
{
"epoch": 1.9775390625,
"grad_norm": 3.5257699489593506,
"learning_rate": 1e-05,
"loss": 0.4533,
"step": 4050
},
{
"epoch": 1.98974609375,
"grad_norm": 3.5330731868743896,
"learning_rate": 1e-05,
"loss": 0.4978,
"step": 4075
},
{
"epoch": 2.001953125,
"grad_norm": 3.301609992980957,
"learning_rate": 1e-05,
"loss": 0.4408,
"step": 4100
},
{
"epoch": 2.01416015625,
"grad_norm": 2.8477797508239746,
"learning_rate": 1e-05,
"loss": 0.4858,
"step": 4125
},
{
"epoch": 2.0263671875,
"grad_norm": 3.097527503967285,
"learning_rate": 1e-05,
"loss": 0.455,
"step": 4150
},
{
"epoch": 2.03857421875,
"grad_norm": 3.6209845542907715,
"learning_rate": 1e-05,
"loss": 0.4461,
"step": 4175
},
{
"epoch": 2.05078125,
"grad_norm": 3.182161331176758,
"learning_rate": 1e-05,
"loss": 0.4472,
"step": 4200
},
{
"epoch": 2.06298828125,
"grad_norm": 4.14016056060791,
"learning_rate": 1e-05,
"loss": 0.4556,
"step": 4225
},
{
"epoch": 2.0751953125,
"grad_norm": 3.5136237144470215,
"learning_rate": 1e-05,
"loss": 0.4817,
"step": 4250
},
{
"epoch": 2.08740234375,
"grad_norm": 4.494429111480713,
"learning_rate": 1e-05,
"loss": 0.4399,
"step": 4275
},
{
"epoch": 2.099609375,
"grad_norm": 4.786192417144775,
"learning_rate": 1e-05,
"loss": 0.4755,
"step": 4300
},
{
"epoch": 2.11181640625,
"grad_norm": 4.873692512512207,
"learning_rate": 1e-05,
"loss": 0.4622,
"step": 4325
},
{
"epoch": 2.1240234375,
"grad_norm": 3.9967992305755615,
"learning_rate": 1e-05,
"loss": 0.4796,
"step": 4350
},
{
"epoch": 2.13623046875,
"grad_norm": 4.429341793060303,
"learning_rate": 1e-05,
"loss": 0.4564,
"step": 4375
},
{
"epoch": 2.1484375,
"grad_norm": 3.952096939086914,
"learning_rate": 1e-05,
"loss": 0.4753,
"step": 4400
},
{
"epoch": 2.16064453125,
"grad_norm": 4.080462455749512,
"learning_rate": 1e-05,
"loss": 0.4625,
"step": 4425
},
{
"epoch": 2.1728515625,
"grad_norm": 3.9949817657470703,
"learning_rate": 1e-05,
"loss": 0.4935,
"step": 4450
},
{
"epoch": 2.18505859375,
"grad_norm": 3.152660846710205,
"learning_rate": 1e-05,
"loss": 0.5341,
"step": 4475
},
{
"epoch": 2.197265625,
"grad_norm": 3.485163927078247,
"learning_rate": 1e-05,
"loss": 0.4524,
"step": 4500
},
{
"epoch": 2.197265625,
"eval_cer": 22.98181818181818,
"eval_loss": 0.4256882667541504,
"eval_normalized_cer": 18.29017673653925,
"eval_runtime": 133.4464,
"eval_samples_per_second": 0.959,
"eval_steps_per_second": 0.06,
"step": 4500
},
{
"epoch": 2.20947265625,
"grad_norm": 3.786696195602417,
"learning_rate": 1e-05,
"loss": 0.4827,
"step": 4525
},
{
"epoch": 2.2216796875,
"grad_norm": 3.4594826698303223,
"learning_rate": 1e-05,
"loss": 0.505,
"step": 4550
},
{
"epoch": 2.23388671875,
"grad_norm": 3.2194244861602783,
"learning_rate": 1e-05,
"loss": 0.465,
"step": 4575
},
{
"epoch": 2.24609375,
"grad_norm": 3.4851391315460205,
"learning_rate": 1e-05,
"loss": 0.4484,
"step": 4600
},
{
"epoch": 2.25830078125,
"grad_norm": 4.488097667694092,
"learning_rate": 1e-05,
"loss": 0.4357,
"step": 4625
},
{
"epoch": 2.2705078125,
"grad_norm": 3.071812868118286,
"learning_rate": 1e-05,
"loss": 0.4411,
"step": 4650
},
{
"epoch": 2.28271484375,
"grad_norm": 4.025106430053711,
"learning_rate": 1e-05,
"loss": 0.4436,
"step": 4675
},
{
"epoch": 2.294921875,
"grad_norm": 3.1780993938446045,
"learning_rate": 1e-05,
"loss": 0.4645,
"step": 4700
},
{
"epoch": 2.30712890625,
"grad_norm": 4.2200446128845215,
"learning_rate": 1e-05,
"loss": 0.5211,
"step": 4725
},
{
"epoch": 2.3193359375,
"grad_norm": 3.622480869293213,
"learning_rate": 1e-05,
"loss": 0.4763,
"step": 4750
},
{
"epoch": 2.33154296875,
"grad_norm": 3.0763843059539795,
"learning_rate": 1e-05,
"loss": 0.4876,
"step": 4775
},
{
"epoch": 2.34375,
"grad_norm": 3.9623701572418213,
"learning_rate": 1e-05,
"loss": 0.4683,
"step": 4800
},
{
"epoch": 2.35595703125,
"grad_norm": 4.627608776092529,
"learning_rate": 1e-05,
"loss": 0.426,
"step": 4825
},
{
"epoch": 2.3681640625,
"grad_norm": 4.717302322387695,
"learning_rate": 1e-05,
"loss": 0.4368,
"step": 4850
},
{
"epoch": 2.38037109375,
"grad_norm": 4.21370792388916,
"learning_rate": 1e-05,
"loss": 0.5348,
"step": 4875
},
{
"epoch": 2.392578125,
"grad_norm": 4.240349292755127,
"learning_rate": 1e-05,
"loss": 0.4766,
"step": 4900
},
{
"epoch": 2.40478515625,
"grad_norm": 4.513136386871338,
"learning_rate": 1e-05,
"loss": 0.421,
"step": 4925
},
{
"epoch": 2.4169921875,
"grad_norm": 3.5849685668945312,
"learning_rate": 1e-05,
"loss": 0.4599,
"step": 4950
},
{
"epoch": 2.42919921875,
"grad_norm": 3.148627519607544,
"learning_rate": 1e-05,
"loss": 0.4812,
"step": 4975
},
{
"epoch": 2.44140625,
"grad_norm": 3.3476321697235107,
"learning_rate": 1e-05,
"loss": 0.5048,
"step": 5000
},
{
"epoch": 2.44140625,
"eval_cer": 23.78181818181818,
"eval_loss": 0.42172056436538696,
"eval_normalized_cer": 18.7422934648582,
"eval_runtime": 133.6191,
"eval_samples_per_second": 0.958,
"eval_steps_per_second": 0.06,
"step": 5000
},
{
"epoch": 2.45361328125,
"grad_norm": 3.0917651653289795,
"learning_rate": 1e-05,
"loss": 0.4876,
"step": 5025
},
{
"epoch": 2.4658203125,
"grad_norm": 4.9287309646606445,
"learning_rate": 1e-05,
"loss": 0.4631,
"step": 5050
},
{
"epoch": 2.47802734375,
"grad_norm": 3.54841947555542,
"learning_rate": 1e-05,
"loss": 0.4752,
"step": 5075
},
{
"epoch": 2.490234375,
"grad_norm": 3.062551975250244,
"learning_rate": 1e-05,
"loss": 0.4232,
"step": 5100
},
{
"epoch": 2.50244140625,
"grad_norm": 5.846357822418213,
"learning_rate": 1e-05,
"loss": 0.4819,
"step": 5125
},
{
"epoch": 2.5146484375,
"grad_norm": 5.39830207824707,
"learning_rate": 1e-05,
"loss": 0.4574,
"step": 5150
},
{
"epoch": 2.52685546875,
"grad_norm": 3.695359468460083,
"learning_rate": 1e-05,
"loss": 0.5137,
"step": 5175
},
{
"epoch": 2.5390625,
"grad_norm": 3.8433547019958496,
"learning_rate": 1e-05,
"loss": 0.4097,
"step": 5200
},
{
"epoch": 2.55126953125,
"grad_norm": 3.887317419052124,
"learning_rate": 1e-05,
"loss": 0.481,
"step": 5225
},
{
"epoch": 2.5634765625,
"grad_norm": 3.9261627197265625,
"learning_rate": 1e-05,
"loss": 0.4513,
"step": 5250
},
{
"epoch": 2.57568359375,
"grad_norm": 2.480574369430542,
"learning_rate": 1e-05,
"loss": 0.4372,
"step": 5275
},
{
"epoch": 2.587890625,
"grad_norm": 4.399667263031006,
"learning_rate": 1e-05,
"loss": 0.4252,
"step": 5300
},
{
"epoch": 2.60009765625,
"grad_norm": 3.6141607761383057,
"learning_rate": 1e-05,
"loss": 0.4369,
"step": 5325
},
{
"epoch": 2.6123046875,
"grad_norm": 2.9269521236419678,
"learning_rate": 1e-05,
"loss": 0.4987,
"step": 5350
},
{
"epoch": 2.62451171875,
"grad_norm": 3.667206048965454,
"learning_rate": 1e-05,
"loss": 0.495,
"step": 5375
},
{
"epoch": 2.63671875,
"grad_norm": 5.493015289306641,
"learning_rate": 1e-05,
"loss": 0.4439,
"step": 5400
},
{
"epoch": 2.64892578125,
"grad_norm": 3.328899383544922,
"learning_rate": 1e-05,
"loss": 0.4539,
"step": 5425
},
{
"epoch": 2.6611328125,
"grad_norm": 2.529545783996582,
"learning_rate": 1e-05,
"loss": 0.4698,
"step": 5450
},
{
"epoch": 2.67333984375,
"grad_norm": 3.669677495956421,
"learning_rate": 1e-05,
"loss": 0.3939,
"step": 5475
},
{
"epoch": 2.685546875,
"grad_norm": 3.070936441421509,
"learning_rate": 1e-05,
"loss": 0.4721,
"step": 5500
},
{
"epoch": 2.685546875,
"eval_cer": 23.01818181818182,
"eval_loss": 0.4226231276988983,
"eval_normalized_cer": 18.33127825729552,
"eval_runtime": 126.3723,
"eval_samples_per_second": 1.013,
"eval_steps_per_second": 0.063,
"step": 5500
},
{
"epoch": 2.69775390625,
"grad_norm": 4.265174865722656,
"learning_rate": 1e-05,
"loss": 0.5078,
"step": 5525
},
{
"epoch": 2.7099609375,
"grad_norm": 5.314130783081055,
"learning_rate": 1e-05,
"loss": 0.452,
"step": 5550
},
{
"epoch": 2.72216796875,
"grad_norm": 4.048317909240723,
"learning_rate": 1e-05,
"loss": 0.4419,
"step": 5575
},
{
"epoch": 2.734375,
"grad_norm": 3.8373677730560303,
"learning_rate": 1e-05,
"loss": 0.4236,
"step": 5600
},
{
"epoch": 2.74658203125,
"grad_norm": 3.880476713180542,
"learning_rate": 1e-05,
"loss": 0.4224,
"step": 5625
},
{
"epoch": 2.7587890625,
"grad_norm": 4.03834867477417,
"learning_rate": 1e-05,
"loss": 0.4762,
"step": 5650
},
{
"epoch": 2.77099609375,
"grad_norm": 3.321204900741577,
"learning_rate": 1e-05,
"loss": 0.4823,
"step": 5675
},
{
"epoch": 2.783203125,
"grad_norm": 3.013662099838257,
"learning_rate": 1e-05,
"loss": 0.4839,
"step": 5700
},
{
"epoch": 2.79541015625,
"grad_norm": 4.0579705238342285,
"learning_rate": 1e-05,
"loss": 0.4754,
"step": 5725
},
{
"epoch": 2.8076171875,
"grad_norm": 3.929385185241699,
"learning_rate": 1e-05,
"loss": 0.4123,
"step": 5750
},
{
"epoch": 2.81982421875,
"grad_norm": 4.071752071380615,
"learning_rate": 1e-05,
"loss": 0.4989,
"step": 5775
},
{
"epoch": 2.83203125,
"grad_norm": 3.5751779079437256,
"learning_rate": 1e-05,
"loss": 0.4507,
"step": 5800
},
{
"epoch": 2.84423828125,
"grad_norm": 3.4296460151672363,
"learning_rate": 1e-05,
"loss": 0.522,
"step": 5825
},
{
"epoch": 2.8564453125,
"grad_norm": 2.707711696624756,
"learning_rate": 1e-05,
"loss": 0.4347,
"step": 5850
},
{
"epoch": 2.86865234375,
"grad_norm": 2.0897769927978516,
"learning_rate": 1e-05,
"loss": 0.4971,
"step": 5875
},
{
"epoch": 2.880859375,
"grad_norm": 2.9398937225341797,
"learning_rate": 1e-05,
"loss": 0.4507,
"step": 5900
},
{
"epoch": 2.89306640625,
"grad_norm": 3.4962351322174072,
"learning_rate": 1e-05,
"loss": 0.4662,
"step": 5925
},
{
"epoch": 2.9052734375,
"grad_norm": 3.520770311355591,
"learning_rate": 1e-05,
"loss": 0.4784,
"step": 5950
},
{
"epoch": 2.91748046875,
"grad_norm": 3.4182958602905273,
"learning_rate": 1e-05,
"loss": 0.4952,
"step": 5975
},
{
"epoch": 2.9296875,
"grad_norm": 2.9308042526245117,
"learning_rate": 1e-05,
"loss": 0.4321,
"step": 6000
},
{
"epoch": 2.9296875,
"eval_cer": 20.472727272727273,
"eval_loss": 0.4113434851169586,
"eval_normalized_cer": 15.577476366625564,
"eval_runtime": 105.0617,
"eval_samples_per_second": 1.218,
"eval_steps_per_second": 0.076,
"step": 6000
},
{
"epoch": 2.94189453125,
"grad_norm": 2.3675143718719482,
"learning_rate": 1e-05,
"loss": 0.5268,
"step": 6025
},
{
"epoch": 2.9541015625,
"grad_norm": 3.0600383281707764,
"learning_rate": 1e-05,
"loss": 0.4166,
"step": 6050
},
{
"epoch": 2.96630859375,
"grad_norm": 3.9981579780578613,
"learning_rate": 1e-05,
"loss": 0.4758,
"step": 6075
},
{
"epoch": 2.978515625,
"grad_norm": 4.047635555267334,
"learning_rate": 1e-05,
"loss": 0.4376,
"step": 6100
},
{
"epoch": 2.99072265625,
"grad_norm": 3.3930447101593018,
"learning_rate": 1e-05,
"loss": 0.4391,
"step": 6125
},
{
"epoch": 3.0029296875,
"grad_norm": 2.583280563354492,
"learning_rate": 1e-05,
"loss": 0.3899,
"step": 6150
},
{
"epoch": 3.01513671875,
"grad_norm": 5.198780059814453,
"learning_rate": 1e-05,
"loss": 0.4538,
"step": 6175
},
{
"epoch": 3.02734375,
"grad_norm": 5.145614147186279,
"learning_rate": 1e-05,
"loss": 0.5109,
"step": 6200
},
{
"epoch": 3.03955078125,
"grad_norm": 4.1711320877075195,
"learning_rate": 1e-05,
"loss": 0.4124,
"step": 6225
},
{
"epoch": 3.0517578125,
"grad_norm": 3.6686923503875732,
"learning_rate": 1e-05,
"loss": 0.4233,
"step": 6250
},
{
"epoch": 3.06396484375,
"grad_norm": 3.542102575302124,
"learning_rate": 1e-05,
"loss": 0.5235,
"step": 6275
},
{
"epoch": 3.076171875,
"grad_norm": 2.960315227508545,
"learning_rate": 1e-05,
"loss": 0.4599,
"step": 6300
},
{
"epoch": 3.08837890625,
"grad_norm": 3.370656728744507,
"learning_rate": 1e-05,
"loss": 0.4668,
"step": 6325
},
{
"epoch": 3.1005859375,
"grad_norm": 4.451176643371582,
"learning_rate": 1e-05,
"loss": 0.4846,
"step": 6350
},
{
"epoch": 3.11279296875,
"grad_norm": 3.628671646118164,
"learning_rate": 1e-05,
"loss": 0.4636,
"step": 6375
},
{
"epoch": 3.125,
"grad_norm": 3.664491653442383,
"learning_rate": 1e-05,
"loss": 0.4843,
"step": 6400
},
{
"epoch": 3.13720703125,
"grad_norm": 3.1877737045288086,
"learning_rate": 1e-05,
"loss": 0.5361,
"step": 6425
},
{
"epoch": 3.1494140625,
"grad_norm": 3.7835752964019775,
"learning_rate": 1e-05,
"loss": 0.4808,
"step": 6450
},
{
"epoch": 3.16162109375,
"grad_norm": 3.494187355041504,
"learning_rate": 1e-05,
"loss": 0.4401,
"step": 6475
},
{
"epoch": 3.173828125,
"grad_norm": 2.837970495223999,
"learning_rate": 1e-05,
"loss": 0.4454,
"step": 6500
},
{
"epoch": 3.173828125,
"eval_cer": 21.709090909090907,
"eval_loss": 0.40472567081451416,
"eval_normalized_cer": 16.152897657213316,
"eval_runtime": 133.6937,
"eval_samples_per_second": 0.957,
"eval_steps_per_second": 0.06,
"step": 6500
},
{
"epoch": 3.18603515625,
"grad_norm": 3.5152645111083984,
"learning_rate": 1e-05,
"loss": 0.4798,
"step": 6525
},
{
"epoch": 3.1982421875,
"grad_norm": 3.201646327972412,
"learning_rate": 1e-05,
"loss": 0.4576,
"step": 6550
},
{
"epoch": 3.21044921875,
"grad_norm": 3.704028606414795,
"learning_rate": 1e-05,
"loss": 0.4428,
"step": 6575
},
{
"epoch": 3.22265625,
"grad_norm": 4.099301815032959,
"learning_rate": 1e-05,
"loss": 0.4787,
"step": 6600
},
{
"epoch": 3.23486328125,
"grad_norm": 3.0543127059936523,
"learning_rate": 1e-05,
"loss": 0.4761,
"step": 6625
},
{
"epoch": 3.2470703125,
"grad_norm": 3.4532554149627686,
"learning_rate": 1e-05,
"loss": 0.4559,
"step": 6650
},
{
"epoch": 3.25927734375,
"grad_norm": 2.8139491081237793,
"learning_rate": 1e-05,
"loss": 0.4705,
"step": 6675
},
{
"epoch": 3.271484375,
"grad_norm": 3.291689157485962,
"learning_rate": 1e-05,
"loss": 0.4738,
"step": 6700
},
{
"epoch": 3.28369140625,
"grad_norm": 3.0359091758728027,
"learning_rate": 1e-05,
"loss": 0.3979,
"step": 6725
},
{
"epoch": 3.2958984375,
"grad_norm": 4.325143337249756,
"learning_rate": 1e-05,
"loss": 0.4309,
"step": 6750
},
{
"epoch": 3.30810546875,
"grad_norm": 3.864635705947876,
"learning_rate": 1e-05,
"loss": 0.4905,
"step": 6775
},
{
"epoch": 3.3203125,
"grad_norm": 3.659311532974243,
"learning_rate": 1e-05,
"loss": 0.5003,
"step": 6800
},
{
"epoch": 3.33251953125,
"grad_norm": 3.6954805850982666,
"learning_rate": 1e-05,
"loss": 0.4197,
"step": 6825
},
{
"epoch": 3.3447265625,
"grad_norm": 2.9357662200927734,
"learning_rate": 1e-05,
"loss": 0.425,
"step": 6850
},
{
"epoch": 3.35693359375,
"grad_norm": 2.9969289302825928,
"learning_rate": 1e-05,
"loss": 0.4883,
"step": 6875
},
{
"epoch": 3.369140625,
"grad_norm": 3.333348035812378,
"learning_rate": 1e-05,
"loss": 0.4687,
"step": 6900
},
{
"epoch": 3.38134765625,
"grad_norm": 4.444482803344727,
"learning_rate": 1e-05,
"loss": 0.4324,
"step": 6925
},
{
"epoch": 3.3935546875,
"grad_norm": 2.3350095748901367,
"learning_rate": 1e-05,
"loss": 0.4476,
"step": 6950
},
{
"epoch": 3.40576171875,
"grad_norm": 3.4752862453460693,
"learning_rate": 1e-05,
"loss": 0.4601,
"step": 6975
},
{
"epoch": 3.41796875,
"grad_norm": 3.1359448432922363,
"learning_rate": 1e-05,
"loss": 0.4894,
"step": 7000
},
{
"epoch": 3.41796875,
"eval_cer": 20.363636363636363,
"eval_loss": 0.41460537910461426,
"eval_normalized_cer": 15.988491574188245,
"eval_runtime": 131.3226,
"eval_samples_per_second": 0.975,
"eval_steps_per_second": 0.061,
"step": 7000
},
{
"epoch": 3.43017578125,
"grad_norm": 3.0270822048187256,
"learning_rate": 1e-05,
"loss": 0.4288,
"step": 7025
},
{
"epoch": 3.4423828125,
"grad_norm": 3.487501382827759,
"learning_rate": 1e-05,
"loss": 0.473,
"step": 7050
},
{
"epoch": 3.45458984375,
"grad_norm": 3.1691272258758545,
"learning_rate": 1e-05,
"loss": 0.4516,
"step": 7075
},
{
"epoch": 3.466796875,
"grad_norm": 3.07665753364563,
"learning_rate": 1e-05,
"loss": 0.429,
"step": 7100
},
{
"epoch": 3.47900390625,
"grad_norm": 3.697643756866455,
"learning_rate": 1e-05,
"loss": 0.479,
"step": 7125
},
{
"epoch": 3.4912109375,
"grad_norm": 3.787980556488037,
"learning_rate": 1e-05,
"loss": 0.4837,
"step": 7150
},
{
"epoch": 3.50341796875,
"grad_norm": 3.792851686477661,
"learning_rate": 1e-05,
"loss": 0.4339,
"step": 7175
},
{
"epoch": 3.515625,
"grad_norm": 4.371828079223633,
"learning_rate": 1e-05,
"loss": 0.4364,
"step": 7200
},
{
"epoch": 3.52783203125,
"grad_norm": 2.8231394290924072,
"learning_rate": 1e-05,
"loss": 0.4776,
"step": 7225
},
{
"epoch": 3.5400390625,
"grad_norm": 3.1332738399505615,
"learning_rate": 1e-05,
"loss": 0.4158,
"step": 7250
},
{
"epoch": 3.55224609375,
"grad_norm": 3.6352245807647705,
"learning_rate": 1e-05,
"loss": 0.4797,
"step": 7275
},
{
"epoch": 3.564453125,
"grad_norm": 4.453933238983154,
"learning_rate": 1e-05,
"loss": 0.4355,
"step": 7300
},
{
"epoch": 3.57666015625,
"grad_norm": 2.9594037532806396,
"learning_rate": 1e-05,
"loss": 0.4696,
"step": 7325
},
{
"epoch": 3.5888671875,
"grad_norm": 5.014936447143555,
"learning_rate": 1e-05,
"loss": 0.4622,
"step": 7350
},
{
"epoch": 3.60107421875,
"grad_norm": 3.2860615253448486,
"learning_rate": 1e-05,
"loss": 0.4536,
"step": 7375
},
{
"epoch": 3.61328125,
"grad_norm": 5.162105083465576,
"learning_rate": 1e-05,
"loss": 0.4456,
"step": 7400
},
{
"epoch": 3.62548828125,
"grad_norm": 3.706555128097534,
"learning_rate": 1e-05,
"loss": 0.4843,
"step": 7425
},
{
"epoch": 3.6376953125,
"grad_norm": 2.490443706512451,
"learning_rate": 1e-05,
"loss": 0.4284,
"step": 7450
},
{
"epoch": 3.64990234375,
"grad_norm": 3.4704225063323975,
"learning_rate": 1e-05,
"loss": 0.4562,
"step": 7475
},
{
"epoch": 3.662109375,
"grad_norm": 3.8659839630126953,
"learning_rate": 1e-05,
"loss": 0.4603,
"step": 7500
},
{
"epoch": 3.662109375,
"eval_cer": 23.78181818181818,
"eval_loss": 0.4173641800880432,
"eval_normalized_cer": 19.35881627620222,
"eval_runtime": 134.5888,
"eval_samples_per_second": 0.951,
"eval_steps_per_second": 0.059,
"step": 7500
},
{
"epoch": 3.67431640625,
"grad_norm": 3.280311346054077,
"learning_rate": 1e-05,
"loss": 0.4501,
"step": 7525
},
{
"epoch": 3.6865234375,
"grad_norm": 4.28645133972168,
"learning_rate": 1e-05,
"loss": 0.4437,
"step": 7550
},
{
"epoch": 3.69873046875,
"grad_norm": 3.8637635707855225,
"learning_rate": 1e-05,
"loss": 0.4302,
"step": 7575
},
{
"epoch": 3.7109375,
"grad_norm": 3.35193133354187,
"learning_rate": 1e-05,
"loss": 0.4298,
"step": 7600
},
{
"epoch": 3.72314453125,
"grad_norm": 3.156805992126465,
"learning_rate": 1e-05,
"loss": 0.4294,
"step": 7625
},
{
"epoch": 3.7353515625,
"grad_norm": 3.2977190017700195,
"learning_rate": 1e-05,
"loss": 0.4471,
"step": 7650
},
{
"epoch": 3.74755859375,
"grad_norm": 3.3117992877960205,
"learning_rate": 1e-05,
"loss": 0.4024,
"step": 7675
},
{
"epoch": 3.759765625,
"grad_norm": 4.135869979858398,
"learning_rate": 1e-05,
"loss": 0.431,
"step": 7700
},
{
"epoch": 3.77197265625,
"grad_norm": 3.1641712188720703,
"learning_rate": 1e-05,
"loss": 0.4494,
"step": 7725
},
{
"epoch": 3.7841796875,
"grad_norm": 3.1952223777770996,
"learning_rate": 1e-05,
"loss": 0.4415,
"step": 7750
},
{
"epoch": 3.79638671875,
"grad_norm": 3.2214698791503906,
"learning_rate": 1e-05,
"loss": 0.4409,
"step": 7775
},
{
"epoch": 3.80859375,
"grad_norm": 3.14152193069458,
"learning_rate": 1e-05,
"loss": 0.4407,
"step": 7800
},
{
"epoch": 3.82080078125,
"grad_norm": 3.8332841396331787,
"learning_rate": 1e-05,
"loss": 0.4438,
"step": 7825
},
{
"epoch": 3.8330078125,
"grad_norm": 3.2300031185150146,
"learning_rate": 1e-05,
"loss": 0.4377,
"step": 7850
},
{
"epoch": 3.84521484375,
"grad_norm": 3.9044997692108154,
"learning_rate": 1e-05,
"loss": 0.4376,
"step": 7875
},
{
"epoch": 3.857421875,
"grad_norm": 2.9638853073120117,
"learning_rate": 1e-05,
"loss": 0.4605,
"step": 7900
},
{
"epoch": 3.86962890625,
"grad_norm": 3.6924691200256348,
"learning_rate": 1e-05,
"loss": 0.4632,
"step": 7925
},
{
"epoch": 3.8818359375,
"grad_norm": 2.571397542953491,
"learning_rate": 1e-05,
"loss": 0.439,
"step": 7950
},
{
"epoch": 3.89404296875,
"grad_norm": 2.900402069091797,
"learning_rate": 1e-05,
"loss": 0.4431,
"step": 7975
},
{
"epoch": 3.90625,
"grad_norm": 3.216660737991333,
"learning_rate": 1e-05,
"loss": 0.4479,
"step": 8000
},
{
"epoch": 3.90625,
"eval_cer": 21.527272727272727,
"eval_loss": 0.40407997369766235,
"eval_normalized_cer": 16.974928072338678,
"eval_runtime": 135.4416,
"eval_samples_per_second": 0.945,
"eval_steps_per_second": 0.059,
"step": 8000
},
{
"epoch": 3.91845703125,
"grad_norm": 3.2848970890045166,
"learning_rate": 1e-05,
"loss": 0.4152,
"step": 8025
},
{
"epoch": 3.9306640625,
"grad_norm": 2.901078939437866,
"learning_rate": 1e-05,
"loss": 0.4292,
"step": 8050
},
{
"epoch": 3.94287109375,
"grad_norm": 3.3636679649353027,
"learning_rate": 1e-05,
"loss": 0.4624,
"step": 8075
},
{
"epoch": 3.955078125,
"grad_norm": 4.242199420928955,
"learning_rate": 1e-05,
"loss": 0.4427,
"step": 8100
},
{
"epoch": 3.96728515625,
"grad_norm": 3.422555446624756,
"learning_rate": 1e-05,
"loss": 0.4161,
"step": 8125
},
{
"epoch": 3.9794921875,
"grad_norm": 2.359015703201294,
"learning_rate": 1e-05,
"loss": 0.422,
"step": 8150
},
{
"epoch": 3.99169921875,
"grad_norm": 3.1188254356384277,
"learning_rate": 1e-05,
"loss": 0.4346,
"step": 8175
},
{
"epoch": 4.00390625,
"grad_norm": 3.1094157695770264,
"learning_rate": 1e-05,
"loss": 0.4511,
"step": 8200
},
{
"epoch": 4.01611328125,
"grad_norm": 3.269327163696289,
"learning_rate": 1e-05,
"loss": 0.4548,
"step": 8225
},
{
"epoch": 4.0283203125,
"grad_norm": 2.589052677154541,
"learning_rate": 1e-05,
"loss": 0.4267,
"step": 8250
},
{
"epoch": 4.04052734375,
"grad_norm": 3.032090187072754,
"learning_rate": 1e-05,
"loss": 0.4194,
"step": 8275
},
{
"epoch": 4.052734375,
"grad_norm": 3.1409432888031006,
"learning_rate": 1e-05,
"loss": 0.4423,
"step": 8300
},
{
"epoch": 4.06494140625,
"grad_norm": 4.070183753967285,
"learning_rate": 1e-05,
"loss": 0.4171,
"step": 8325
},
{
"epoch": 4.0771484375,
"grad_norm": 4.6292619705200195,
"learning_rate": 1e-05,
"loss": 0.4689,
"step": 8350
},
{
"epoch": 4.08935546875,
"grad_norm": 3.5778212547302246,
"learning_rate": 1e-05,
"loss": 0.4341,
"step": 8375
},
{
"epoch": 4.1015625,
"grad_norm": 2.5463714599609375,
"learning_rate": 1e-05,
"loss": 0.4193,
"step": 8400
},
{
"epoch": 4.11376953125,
"grad_norm": 2.498852491378784,
"learning_rate": 1e-05,
"loss": 0.4656,
"step": 8425
},
{
"epoch": 4.1259765625,
"grad_norm": 2.5242180824279785,
"learning_rate": 1e-05,
"loss": 0.4491,
"step": 8450
},
{
"epoch": 4.13818359375,
"grad_norm": 4.114852428436279,
"learning_rate": 1e-05,
"loss": 0.4202,
"step": 8475
},
{
"epoch": 4.150390625,
"grad_norm": 2.866490125656128,
"learning_rate": 1e-05,
"loss": 0.4523,
"step": 8500
},
{
"epoch": 4.150390625,
"eval_cer": 19.70909090909091,
"eval_loss": 0.40719008445739746,
"eval_normalized_cer": 15.371968762844226,
"eval_runtime": 136.9983,
"eval_samples_per_second": 0.934,
"eval_steps_per_second": 0.058,
"step": 8500
},
{
"epoch": 4.16259765625,
"grad_norm": 4.2479448318481445,
"learning_rate": 1e-05,
"loss": 0.4742,
"step": 8525
},
{
"epoch": 4.1748046875,
"grad_norm": 2.736863851547241,
"learning_rate": 1e-05,
"loss": 0.4366,
"step": 8550
},
{
"epoch": 4.18701171875,
"grad_norm": 3.3167812824249268,
"learning_rate": 1e-05,
"loss": 0.4509,
"step": 8575
},
{
"epoch": 4.19921875,
"grad_norm": 2.8058440685272217,
"learning_rate": 1e-05,
"loss": 0.4179,
"step": 8600
},
{
"epoch": 4.21142578125,
"grad_norm": 3.10642409324646,
"learning_rate": 1e-05,
"loss": 0.4799,
"step": 8625
},
{
"epoch": 4.2236328125,
"grad_norm": 3.9041507244110107,
"learning_rate": 1e-05,
"loss": 0.4788,
"step": 8650
},
{
"epoch": 4.23583984375,
"grad_norm": 4.171154022216797,
"learning_rate": 1e-05,
"loss": 0.4534,
"step": 8675
},
{
"epoch": 4.248046875,
"grad_norm": 2.6511294841766357,
"learning_rate": 1e-05,
"loss": 0.4395,
"step": 8700
},
{
"epoch": 4.26025390625,
"grad_norm": 3.9899098873138428,
"learning_rate": 1e-05,
"loss": 0.4421,
"step": 8725
},
{
"epoch": 4.2724609375,
"grad_norm": 2.973851442337036,
"learning_rate": 1e-05,
"loss": 0.4166,
"step": 8750
},
{
"epoch": 4.28466796875,
"grad_norm": 3.789973497390747,
"learning_rate": 1e-05,
"loss": 0.4312,
"step": 8775
},
{
"epoch": 4.296875,
"grad_norm": 4.157674789428711,
"learning_rate": 1e-05,
"loss": 0.4571,
"step": 8800
},
{
"epoch": 4.30908203125,
"grad_norm": 4.191178321838379,
"learning_rate": 1e-05,
"loss": 0.4222,
"step": 8825
},
{
"epoch": 4.3212890625,
"grad_norm": 2.899761915206909,
"learning_rate": 1e-05,
"loss": 0.4174,
"step": 8850
},
{
"epoch": 4.33349609375,
"grad_norm": 2.9615023136138916,
"learning_rate": 1e-05,
"loss": 0.4684,
"step": 8875
},
{
"epoch": 4.345703125,
"grad_norm": 2.9529151916503906,
"learning_rate": 1e-05,
"loss": 0.4079,
"step": 8900
},
{
"epoch": 4.35791015625,
"grad_norm": 3.4950997829437256,
"learning_rate": 1e-05,
"loss": 0.4239,
"step": 8925
},
{
"epoch": 4.3701171875,
"grad_norm": 4.151655673980713,
"learning_rate": 1e-05,
"loss": 0.4737,
"step": 8950
},
{
"epoch": 4.38232421875,
"grad_norm": 3.2207164764404297,
"learning_rate": 1e-05,
"loss": 0.468,
"step": 8975
},
{
"epoch": 4.39453125,
"grad_norm": 4.433598041534424,
"learning_rate": 1e-05,
"loss": 0.4742,
"step": 9000
},
{
"epoch": 4.39453125,
"eval_cer": 20.836363636363636,
"eval_loss": 0.40726813673973083,
"eval_normalized_cer": 16.563912864775997,
"eval_runtime": 148.3773,
"eval_samples_per_second": 0.863,
"eval_steps_per_second": 0.054,
"step": 9000
},
{
"epoch": 4.40673828125,
"grad_norm": 2.8774595260620117,
"learning_rate": 1e-05,
"loss": 0.4355,
"step": 9025
},
{
"epoch": 4.4189453125,
"grad_norm": 3.043325185775757,
"learning_rate": 1e-05,
"loss": 0.4267,
"step": 9050
},
{
"epoch": 4.43115234375,
"grad_norm": 3.7946414947509766,
"learning_rate": 1e-05,
"loss": 0.4424,
"step": 9075
},
{
"epoch": 4.443359375,
"grad_norm": 2.7561936378479004,
"learning_rate": 1e-05,
"loss": 0.4054,
"step": 9100
},
{
"epoch": 4.45556640625,
"grad_norm": 3.3554115295410156,
"learning_rate": 1e-05,
"loss": 0.4442,
"step": 9125
},
{
"epoch": 4.4677734375,
"grad_norm": 3.1039364337921143,
"learning_rate": 1e-05,
"loss": 0.4289,
"step": 9150
},
{
"epoch": 4.47998046875,
"grad_norm": 2.8910741806030273,
"learning_rate": 1e-05,
"loss": 0.4103,
"step": 9175
},
{
"epoch": 4.4921875,
"grad_norm": 3.005373001098633,
"learning_rate": 1e-05,
"loss": 0.3944,
"step": 9200
},
{
"epoch": 4.50439453125,
"grad_norm": 2.7834503650665283,
"learning_rate": 1e-05,
"loss": 0.4235,
"step": 9225
},
{
"epoch": 4.5166015625,
"grad_norm": 3.2540178298950195,
"learning_rate": 1e-05,
"loss": 0.4617,
"step": 9250
},
{
"epoch": 4.52880859375,
"grad_norm": 3.1600005626678467,
"learning_rate": 1e-05,
"loss": 0.4445,
"step": 9275
},
{
"epoch": 4.541015625,
"grad_norm": 3.318638801574707,
"learning_rate": 1e-05,
"loss": 0.4164,
"step": 9300
},
{
"epoch": 4.55322265625,
"grad_norm": 4.483547210693359,
"learning_rate": 1e-05,
"loss": 0.468,
"step": 9325
},
{
"epoch": 4.5654296875,
"grad_norm": 3.946882963180542,
"learning_rate": 1e-05,
"loss": 0.4349,
"step": 9350
},
{
"epoch": 4.57763671875,
"grad_norm": 2.6997079849243164,
"learning_rate": 1e-05,
"loss": 0.4483,
"step": 9375
},
{
"epoch": 4.58984375,
"grad_norm": 5.617648601531982,
"learning_rate": 1e-05,
"loss": 0.477,
"step": 9400
},
{
"epoch": 4.60205078125,
"grad_norm": 3.01735520362854,
"learning_rate": 1e-05,
"loss": 0.4198,
"step": 9425
},
{
"epoch": 4.6142578125,
"grad_norm": 3.301004648208618,
"learning_rate": 1e-05,
"loss": 0.466,
"step": 9450
},
{
"epoch": 4.62646484375,
"grad_norm": 3.2421813011169434,
"learning_rate": 1e-05,
"loss": 0.4366,
"step": 9475
},
{
"epoch": 4.638671875,
"grad_norm": 4.813492774963379,
"learning_rate": 1e-05,
"loss": 0.4737,
"step": 9500
},
{
"epoch": 4.638671875,
"eval_cer": 21.78181818181818,
"eval_loss": 0.40351322293281555,
"eval_normalized_cer": 16.892725030826142,
"eval_runtime": 136.3812,
"eval_samples_per_second": 0.939,
"eval_steps_per_second": 0.059,
"step": 9500
},
{
"epoch": 4.65087890625,
"grad_norm": 3.3861124515533447,
"learning_rate": 1e-05,
"loss": 0.447,
"step": 9525
},
{
"epoch": 4.6630859375,
"grad_norm": 3.244462013244629,
"learning_rate": 1e-05,
"loss": 0.4422,
"step": 9550
},
{
"epoch": 4.67529296875,
"grad_norm": 3.824782133102417,
"learning_rate": 1e-05,
"loss": 0.4431,
"step": 9575
},
{
"epoch": 4.6875,
"grad_norm": 3.746281862258911,
"learning_rate": 1e-05,
"loss": 0.48,
"step": 9600
},
{
"epoch": 4.69970703125,
"grad_norm": 4.126583099365234,
"learning_rate": 1e-05,
"loss": 0.4643,
"step": 9625
},
{
"epoch": 4.7119140625,
"grad_norm": 3.3953585624694824,
"learning_rate": 1e-05,
"loss": 0.4826,
"step": 9650
},
{
"epoch": 4.72412109375,
"grad_norm": 3.077698230743408,
"learning_rate": 1e-05,
"loss": 0.4138,
"step": 9675
},
{
"epoch": 4.736328125,
"grad_norm": 4.628427982330322,
"learning_rate": 1e-05,
"loss": 0.4102,
"step": 9700
},
{
"epoch": 4.74853515625,
"grad_norm": 3.1975038051605225,
"learning_rate": 1e-05,
"loss": 0.4374,
"step": 9725
},
{
"epoch": 4.7607421875,
"grad_norm": 3.606600522994995,
"learning_rate": 1e-05,
"loss": 0.4095,
"step": 9750
},
{
"epoch": 4.77294921875,
"grad_norm": 3.183274507522583,
"learning_rate": 1e-05,
"loss": 0.4759,
"step": 9775
},
{
"epoch": 4.78515625,
"grad_norm": 3.52425479888916,
"learning_rate": 1e-05,
"loss": 0.4429,
"step": 9800
},
{
"epoch": 4.79736328125,
"grad_norm": 3.2504448890686035,
"learning_rate": 1e-05,
"loss": 0.4271,
"step": 9825
},
{
"epoch": 4.8095703125,
"grad_norm": 3.7762293815612793,
"learning_rate": 1e-05,
"loss": 0.4164,
"step": 9850
},
{
"epoch": 4.82177734375,
"grad_norm": 2.5560717582702637,
"learning_rate": 1e-05,
"loss": 0.4622,
"step": 9875
},
{
"epoch": 4.833984375,
"grad_norm": 2.2996389865875244,
"learning_rate": 1e-05,
"loss": 0.4728,
"step": 9900
},
{
"epoch": 4.84619140625,
"grad_norm": 4.10749626159668,
"learning_rate": 1e-05,
"loss": 0.4954,
"step": 9925
},
{
"epoch": 4.8583984375,
"grad_norm": 3.2446017265319824,
"learning_rate": 1e-05,
"loss": 0.485,
"step": 9950
},
{
"epoch": 4.87060546875,
"grad_norm": 2.7468326091766357,
"learning_rate": 1e-05,
"loss": 0.4026,
"step": 9975
},
{
"epoch": 4.8828125,
"grad_norm": 4.006360054016113,
"learning_rate": 1e-05,
"loss": 0.4785,
"step": 10000
},
{
"epoch": 4.8828125,
"eval_cer": 21.490909090909092,
"eval_loss": 0.39705541729927063,
"eval_normalized_cer": 16.48170982326346,
"eval_runtime": 135.3957,
"eval_samples_per_second": 0.945,
"eval_steps_per_second": 0.059,
"step": 10000
},
{
"epoch": 4.89501953125,
"grad_norm": 1.9145240783691406,
"learning_rate": 1e-05,
"loss": 0.4676,
"step": 10025
},
{
"epoch": 4.9072265625,
"grad_norm": 3.474172830581665,
"learning_rate": 1e-05,
"loss": 0.4759,
"step": 10050
},
{
"epoch": 4.91943359375,
"grad_norm": 4.628629684448242,
"learning_rate": 1e-05,
"loss": 0.4614,
"step": 10075
},
{
"epoch": 4.931640625,
"grad_norm": 3.4078354835510254,
"learning_rate": 1e-05,
"loss": 0.4194,
"step": 10100
},
{
"epoch": 4.94384765625,
"grad_norm": 2.336055278778076,
"learning_rate": 1e-05,
"loss": 0.4508,
"step": 10125
},
{
"epoch": 4.9560546875,
"grad_norm": 3.3985817432403564,
"learning_rate": 1e-05,
"loss": 0.5009,
"step": 10150
},
{
"epoch": 4.96826171875,
"grad_norm": 3.0444228649139404,
"learning_rate": 1e-05,
"loss": 0.4485,
"step": 10175
},
{
"epoch": 4.98046875,
"grad_norm": 3.737316131591797,
"learning_rate": 1e-05,
"loss": 0.4723,
"step": 10200
},
{
"epoch": 4.99267578125,
"grad_norm": 2.8594186305999756,
"learning_rate": 1e-05,
"loss": 0.4411,
"step": 10225
},
{
"epoch": 5.0048828125,
"grad_norm": 3.403825044631958,
"learning_rate": 1e-05,
"loss": 0.3681,
"step": 10250
},
{
"epoch": 5.01708984375,
"grad_norm": 2.896949291229248,
"learning_rate": 1e-05,
"loss": 0.4386,
"step": 10275
},
{
"epoch": 5.029296875,
"grad_norm": 3.3506815433502197,
"learning_rate": 1e-05,
"loss": 0.4113,
"step": 10300
},
{
"epoch": 5.04150390625,
"grad_norm": 4.682892322540283,
"learning_rate": 1e-05,
"loss": 0.4773,
"step": 10325
},
{
"epoch": 5.0537109375,
"grad_norm": 4.2950439453125,
"learning_rate": 1e-05,
"loss": 0.4289,
"step": 10350
},
{
"epoch": 5.06591796875,
"grad_norm": 3.2204432487487793,
"learning_rate": 1e-05,
"loss": 0.423,
"step": 10375
},
{
"epoch": 5.078125,
"grad_norm": 4.998103618621826,
"learning_rate": 1e-05,
"loss": 0.4115,
"step": 10400
},
{
"epoch": 5.09033203125,
"grad_norm": 2.7820868492126465,
"learning_rate": 1e-05,
"loss": 0.4685,
"step": 10425
},
{
"epoch": 5.1025390625,
"grad_norm": 3.236323356628418,
"learning_rate": 1e-05,
"loss": 0.4255,
"step": 10450
},
{
"epoch": 5.11474609375,
"grad_norm": 3.809432029724121,
"learning_rate": 1e-05,
"loss": 0.4964,
"step": 10475
},
{
"epoch": 5.126953125,
"grad_norm": 5.030189514160156,
"learning_rate": 1e-05,
"loss": 0.4664,
"step": 10500
},
{
"epoch": 5.126953125,
"eval_cer": 22.581818181818182,
"eval_loss": 0.4009712040424347,
"eval_normalized_cer": 18.33127825729552,
"eval_runtime": 135.5914,
"eval_samples_per_second": 0.944,
"eval_steps_per_second": 0.059,
"step": 10500
},
{
"epoch": 5.13916015625,
"grad_norm": 2.855731248855591,
"learning_rate": 1e-05,
"loss": 0.4279,
"step": 10525
},
{
"epoch": 5.1513671875,
"grad_norm": 3.264129161834717,
"learning_rate": 1e-05,
"loss": 0.4094,
"step": 10550
},
{
"epoch": 5.16357421875,
"grad_norm": 3.299495220184326,
"learning_rate": 1e-05,
"loss": 0.4343,
"step": 10575
},
{
"epoch": 5.17578125,
"grad_norm": 3.564100503921509,
"learning_rate": 1e-05,
"loss": 0.4514,
"step": 10600
},
{
"epoch": 5.18798828125,
"grad_norm": 2.6773531436920166,
"learning_rate": 1e-05,
"loss": 0.4512,
"step": 10625
},
{
"epoch": 5.2001953125,
"grad_norm": 3.721902370452881,
"learning_rate": 1e-05,
"loss": 0.4326,
"step": 10650
},
{
"epoch": 5.21240234375,
"grad_norm": 2.919142961502075,
"learning_rate": 1e-05,
"loss": 0.4466,
"step": 10675
},
{
"epoch": 5.224609375,
"grad_norm": 3.0286405086517334,
"learning_rate": 1e-05,
"loss": 0.3802,
"step": 10700
},
{
"epoch": 5.23681640625,
"grad_norm": 4.255770206451416,
"learning_rate": 1e-05,
"loss": 0.4289,
"step": 10725
},
{
"epoch": 5.2490234375,
"grad_norm": 4.174068450927734,
"learning_rate": 1e-05,
"loss": 0.4699,
"step": 10750
},
{
"epoch": 5.26123046875,
"grad_norm": 2.8515052795410156,
"learning_rate": 1e-05,
"loss": 0.464,
"step": 10775
},
{
"epoch": 5.2734375,
"grad_norm": 2.487253427505493,
"learning_rate": 1e-05,
"loss": 0.4222,
"step": 10800
},
{
"epoch": 5.28564453125,
"grad_norm": 2.592994213104248,
"learning_rate": 1e-05,
"loss": 0.3916,
"step": 10825
},
{
"epoch": 5.2978515625,
"grad_norm": 3.5928993225097656,
"learning_rate": 1e-05,
"loss": 0.4278,
"step": 10850
},
{
"epoch": 5.31005859375,
"grad_norm": 3.690007448196411,
"learning_rate": 1e-05,
"loss": 0.4369,
"step": 10875
},
{
"epoch": 5.322265625,
"grad_norm": 3.200507879257202,
"learning_rate": 1e-05,
"loss": 0.391,
"step": 10900
},
{
"epoch": 5.33447265625,
"grad_norm": 2.6833174228668213,
"learning_rate": 1e-05,
"loss": 0.4602,
"step": 10925
},
{
"epoch": 5.3466796875,
"grad_norm": 4.44938325881958,
"learning_rate": 1e-05,
"loss": 0.4648,
"step": 10950
},
{
"epoch": 5.35888671875,
"grad_norm": 3.660412073135376,
"learning_rate": 1e-05,
"loss": 0.4193,
"step": 10975
},
{
"epoch": 5.37109375,
"grad_norm": 3.3475520610809326,
"learning_rate": 1e-05,
"loss": 0.4497,
"step": 11000
},
{
"epoch": 5.37109375,
"eval_cer": 23.381818181818183,
"eval_loss": 0.40254130959510803,
"eval_normalized_cer": 18.824496506370735,
"eval_runtime": 134.1905,
"eval_samples_per_second": 0.954,
"eval_steps_per_second": 0.06,
"step": 11000
},
{
"epoch": 5.38330078125,
"grad_norm": 2.6810033321380615,
"learning_rate": 1e-05,
"loss": 0.4332,
"step": 11025
},
{
"epoch": 5.3955078125,
"grad_norm": 3.203681707382202,
"learning_rate": 1e-05,
"loss": 0.4225,
"step": 11050
},
{
"epoch": 5.40771484375,
"grad_norm": 2.916377305984497,
"learning_rate": 1e-05,
"loss": 0.4133,
"step": 11075
},
{
"epoch": 5.419921875,
"grad_norm": 3.2195537090301514,
"learning_rate": 1e-05,
"loss": 0.4842,
"step": 11100
},
{
"epoch": 5.43212890625,
"grad_norm": 3.1595821380615234,
"learning_rate": 1e-05,
"loss": 0.4547,
"step": 11125
},
{
"epoch": 5.4443359375,
"grad_norm": 3.207057237625122,
"learning_rate": 1e-05,
"loss": 0.394,
"step": 11150
},
{
"epoch": 5.45654296875,
"grad_norm": 2.9330296516418457,
"learning_rate": 1e-05,
"loss": 0.3902,
"step": 11175
},
{
"epoch": 5.46875,
"grad_norm": 2.4040415287017822,
"learning_rate": 1e-05,
"loss": 0.4565,
"step": 11200
},
{
"epoch": 5.48095703125,
"grad_norm": 5.465662479400635,
"learning_rate": 1e-05,
"loss": 0.4368,
"step": 11225
},
{
"epoch": 5.4931640625,
"grad_norm": 3.320988178253174,
"learning_rate": 1e-05,
"loss": 0.4346,
"step": 11250
},
{
"epoch": 5.50537109375,
"grad_norm": 2.748600959777832,
"learning_rate": 1e-05,
"loss": 0.4632,
"step": 11275
},
{
"epoch": 5.517578125,
"grad_norm": 2.1236891746520996,
"learning_rate": 1e-05,
"loss": 0.4194,
"step": 11300
},
{
"epoch": 5.52978515625,
"grad_norm": 3.5435776710510254,
"learning_rate": 1e-05,
"loss": 0.4293,
"step": 11325
},
{
"epoch": 5.5419921875,
"grad_norm": 3.9727423191070557,
"learning_rate": 1e-05,
"loss": 0.4373,
"step": 11350
},
{
"epoch": 5.55419921875,
"grad_norm": 4.122025966644287,
"learning_rate": 1e-05,
"loss": 0.3914,
"step": 11375
},
{
"epoch": 5.56640625,
"grad_norm": 3.523620128631592,
"learning_rate": 1e-05,
"loss": 0.4883,
"step": 11400
},
{
"epoch": 5.57861328125,
"grad_norm": 3.5819413661956787,
"learning_rate": 1e-05,
"loss": 0.4592,
"step": 11425
},
{
"epoch": 5.5908203125,
"grad_norm": 3.022115468978882,
"learning_rate": 1e-05,
"loss": 0.4231,
"step": 11450
},
{
"epoch": 5.60302734375,
"grad_norm": 3.9213130474090576,
"learning_rate": 1e-05,
"loss": 0.4391,
"step": 11475
},
{
"epoch": 5.615234375,
"grad_norm": 2.55260968208313,
"learning_rate": 1e-05,
"loss": 0.4375,
"step": 11500
},
{
"epoch": 5.615234375,
"eval_cer": 23.745454545454546,
"eval_loss": 0.3965916633605957,
"eval_normalized_cer": 20.098643649815042,
"eval_runtime": 136.6172,
"eval_samples_per_second": 0.937,
"eval_steps_per_second": 0.059,
"step": 11500
},
{
"epoch": 5.62744140625,
"grad_norm": 4.3812479972839355,
"learning_rate": 1e-05,
"loss": 0.4251,
"step": 11525
},
{
"epoch": 5.6396484375,
"grad_norm": 3.3777589797973633,
"learning_rate": 1e-05,
"loss": 0.4634,
"step": 11550
},
{
"epoch": 5.65185546875,
"grad_norm": 3.418990135192871,
"learning_rate": 1e-05,
"loss": 0.453,
"step": 11575
},
{
"epoch": 5.6640625,
"grad_norm": 3.4627134799957275,
"learning_rate": 1e-05,
"loss": 0.4335,
"step": 11600
},
{
"epoch": 5.67626953125,
"grad_norm": 3.955687999725342,
"learning_rate": 1e-05,
"loss": 0.4648,
"step": 11625
},
{
"epoch": 5.6884765625,
"grad_norm": 3.9896974563598633,
"learning_rate": 1e-05,
"loss": 0.468,
"step": 11650
},
{
"epoch": 5.70068359375,
"grad_norm": 4.082157611846924,
"learning_rate": 1e-05,
"loss": 0.4801,
"step": 11675
},
{
"epoch": 5.712890625,
"grad_norm": 2.509510040283203,
"learning_rate": 1e-05,
"loss": 0.4414,
"step": 11700
},
{
"epoch": 5.72509765625,
"grad_norm": 4.054114818572998,
"learning_rate": 1e-05,
"loss": 0.4341,
"step": 11725
},
{
"epoch": 5.7373046875,
"grad_norm": 3.3360326290130615,
"learning_rate": 1e-05,
"loss": 0.4294,
"step": 11750
},
{
"epoch": 5.74951171875,
"grad_norm": 3.023287773132324,
"learning_rate": 1e-05,
"loss": 0.4177,
"step": 11775
},
{
"epoch": 5.76171875,
"grad_norm": 2.7048487663269043,
"learning_rate": 1e-05,
"loss": 0.4284,
"step": 11800
},
{
"epoch": 5.77392578125,
"grad_norm": 3.7769124507904053,
"learning_rate": 1e-05,
"loss": 0.4297,
"step": 11825
},
{
"epoch": 5.7861328125,
"grad_norm": 2.583249092102051,
"learning_rate": 1e-05,
"loss": 0.4328,
"step": 11850
},
{
"epoch": 5.79833984375,
"grad_norm": 4.198108196258545,
"learning_rate": 1e-05,
"loss": 0.4499,
"step": 11875
},
{
"epoch": 5.810546875,
"grad_norm": 2.8993310928344727,
"learning_rate": 1e-05,
"loss": 0.4081,
"step": 11900
},
{
"epoch": 5.82275390625,
"grad_norm": 3.5087976455688477,
"learning_rate": 1e-05,
"loss": 0.4716,
"step": 11925
},
{
"epoch": 5.8349609375,
"grad_norm": 3.527189254760742,
"learning_rate": 1e-05,
"loss": 0.4229,
"step": 11950
},
{
"epoch": 5.84716796875,
"grad_norm": 2.4347596168518066,
"learning_rate": 1e-05,
"loss": 0.448,
"step": 11975
},
{
"epoch": 5.859375,
"grad_norm": 3.4706249237060547,
"learning_rate": 1e-05,
"loss": 0.4526,
"step": 12000
},
{
"epoch": 5.859375,
"eval_cer": 24.87272727272727,
"eval_loss": 0.3955676555633545,
"eval_normalized_cer": 20.263049732840116,
"eval_runtime": 133.1253,
"eval_samples_per_second": 0.961,
"eval_steps_per_second": 0.06,
"step": 12000
},
{
"epoch": 5.87158203125,
"grad_norm": 3.7815253734588623,
"learning_rate": 1e-05,
"loss": 0.4,
"step": 12025
},
{
"epoch": 5.8837890625,
"grad_norm": 4.76561164855957,
"learning_rate": 1e-05,
"loss": 0.4176,
"step": 12050
},
{
"epoch": 5.89599609375,
"grad_norm": 3.329918622970581,
"learning_rate": 1e-05,
"loss": 0.4232,
"step": 12075
},
{
"epoch": 5.908203125,
"grad_norm": 4.160305023193359,
"learning_rate": 1e-05,
"loss": 0.4123,
"step": 12100
},
{
"epoch": 5.92041015625,
"grad_norm": 3.11075758934021,
"learning_rate": 1e-05,
"loss": 0.4468,
"step": 12125
},
{
"epoch": 5.9326171875,
"grad_norm": 3.1409554481506348,
"learning_rate": 1e-05,
"loss": 0.4624,
"step": 12150
},
{
"epoch": 5.94482421875,
"grad_norm": 2.9979705810546875,
"learning_rate": 1e-05,
"loss": 0.4369,
"step": 12175
},
{
"epoch": 5.95703125,
"grad_norm": 3.538670301437378,
"learning_rate": 1e-05,
"loss": 0.4299,
"step": 12200
},
{
"epoch": 5.96923828125,
"grad_norm": 3.377985954284668,
"learning_rate": 1e-05,
"loss": 0.3944,
"step": 12225
},
{
"epoch": 5.9814453125,
"grad_norm": 3.6021058559417725,
"learning_rate": 1e-05,
"loss": 0.4314,
"step": 12250
},
{
"epoch": 5.99365234375,
"grad_norm": 3.2901623249053955,
"learning_rate": 1e-05,
"loss": 0.4104,
"step": 12275
},
{
"epoch": 6.005859375,
"grad_norm": 4.384257793426514,
"learning_rate": 1e-05,
"loss": 0.5273,
"step": 12300
},
{
"epoch": 6.01806640625,
"grad_norm": 3.564580202102661,
"learning_rate": 1e-05,
"loss": 0.4292,
"step": 12325
},
{
"epoch": 6.0302734375,
"grad_norm": 3.4145617485046387,
"learning_rate": 1e-05,
"loss": 0.4183,
"step": 12350
},
{
"epoch": 6.04248046875,
"grad_norm": 3.1783013343811035,
"learning_rate": 1e-05,
"loss": 0.4193,
"step": 12375
},
{
"epoch": 6.0546875,
"grad_norm": 3.3418772220611572,
"learning_rate": 1e-05,
"loss": 0.4635,
"step": 12400
},
{
"epoch": 6.06689453125,
"grad_norm": 3.057255744934082,
"learning_rate": 1e-05,
"loss": 0.4068,
"step": 12425
},
{
"epoch": 6.0791015625,
"grad_norm": 3.5321204662323,
"learning_rate": 1e-05,
"loss": 0.3614,
"step": 12450
},
{
"epoch": 6.09130859375,
"grad_norm": 3.536223888397217,
"learning_rate": 1e-05,
"loss": 0.4195,
"step": 12475
},
{
"epoch": 6.103515625,
"grad_norm": 2.386425018310547,
"learning_rate": 1e-05,
"loss": 0.4545,
"step": 12500
},
{
"epoch": 6.103515625,
"eval_cer": 26.47272727272727,
"eval_loss": 0.38523948192596436,
"eval_normalized_cer": 20.550760378133994,
"eval_runtime": 117.0609,
"eval_samples_per_second": 1.093,
"eval_steps_per_second": 0.068,
"step": 12500
},
{
"epoch": 6.11572265625,
"grad_norm": 2.7935492992401123,
"learning_rate": 1e-05,
"loss": 0.4149,
"step": 12525
},
{
"epoch": 6.1279296875,
"grad_norm": 3.3430776596069336,
"learning_rate": 1e-05,
"loss": 0.4718,
"step": 12550
},
{
"epoch": 6.14013671875,
"grad_norm": 2.8501319885253906,
"learning_rate": 1e-05,
"loss": 0.467,
"step": 12575
},
{
"epoch": 6.15234375,
"grad_norm": 3.3480148315429688,
"learning_rate": 1e-05,
"loss": 0.4245,
"step": 12600
},
{
"epoch": 6.16455078125,
"grad_norm": 3.101698160171509,
"learning_rate": 1e-05,
"loss": 0.4185,
"step": 12625
},
{
"epoch": 6.1767578125,
"grad_norm": 2.7815115451812744,
"learning_rate": 1e-05,
"loss": 0.446,
"step": 12650
},
{
"epoch": 6.18896484375,
"grad_norm": 2.8936548233032227,
"learning_rate": 1e-05,
"loss": 0.4136,
"step": 12675
},
{
"epoch": 6.201171875,
"grad_norm": 4.183928966522217,
"learning_rate": 1e-05,
"loss": 0.4384,
"step": 12700
},
{
"epoch": 6.21337890625,
"grad_norm": 3.2840652465820312,
"learning_rate": 1e-05,
"loss": 0.4782,
"step": 12725
},
{
"epoch": 6.2255859375,
"grad_norm": 3.9204938411712646,
"learning_rate": 1e-05,
"loss": 0.432,
"step": 12750
},
{
"epoch": 6.23779296875,
"grad_norm": 4.316317081451416,
"learning_rate": 1e-05,
"loss": 0.4644,
"step": 12775
},
{
"epoch": 6.25,
"grad_norm": 3.1822116374969482,
"learning_rate": 1e-05,
"loss": 0.4331,
"step": 12800
},
{
"epoch": 6.26220703125,
"grad_norm": 2.9394965171813965,
"learning_rate": 1e-05,
"loss": 0.4263,
"step": 12825
},
{
"epoch": 6.2744140625,
"grad_norm": 3.419806718826294,
"learning_rate": 1e-05,
"loss": 0.4219,
"step": 12850
},
{
"epoch": 6.28662109375,
"grad_norm": 3.1678640842437744,
"learning_rate": 1e-05,
"loss": 0.4425,
"step": 12875
},
{
"epoch": 6.298828125,
"grad_norm": 4.1609697341918945,
"learning_rate": 1e-05,
"loss": 0.4355,
"step": 12900
},
{
"epoch": 6.31103515625,
"grad_norm": 3.895420789718628,
"learning_rate": 1e-05,
"loss": 0.4633,
"step": 12925
},
{
"epoch": 6.3232421875,
"grad_norm": 3.244763135910034,
"learning_rate": 1e-05,
"loss": 0.4219,
"step": 12950
},
{
"epoch": 6.33544921875,
"grad_norm": 4.2628984451293945,
"learning_rate": 1e-05,
"loss": 0.432,
"step": 12975
},
{
"epoch": 6.34765625,
"grad_norm": 4.046095848083496,
"learning_rate": 1e-05,
"loss": 0.4199,
"step": 13000
},
{
"epoch": 6.34765625,
"eval_cer": 20.909090909090907,
"eval_loss": 0.38916099071502686,
"eval_normalized_cer": 16.31730374023839,
"eval_runtime": 128.9363,
"eval_samples_per_second": 0.993,
"eval_steps_per_second": 0.062,
"step": 13000
},
{
"epoch": 6.35986328125,
"grad_norm": 3.0475289821624756,
"learning_rate": 1e-05,
"loss": 0.4105,
"step": 13025
},
{
"epoch": 6.3720703125,
"grad_norm": 3.288393974304199,
"learning_rate": 1e-05,
"loss": 0.4627,
"step": 13050
},
{
"epoch": 6.38427734375,
"grad_norm": 4.572826862335205,
"learning_rate": 1e-05,
"loss": 0.4259,
"step": 13075
},
{
"epoch": 6.396484375,
"grad_norm": 3.2055158615112305,
"learning_rate": 1e-05,
"loss": 0.4124,
"step": 13100
},
{
"epoch": 6.40869140625,
"grad_norm": 3.237438917160034,
"learning_rate": 1e-05,
"loss": 0.4525,
"step": 13125
},
{
"epoch": 6.4208984375,
"grad_norm": 3.7672817707061768,
"learning_rate": 1e-05,
"loss": 0.4218,
"step": 13150
},
{
"epoch": 6.43310546875,
"grad_norm": 2.8958332538604736,
"learning_rate": 1e-05,
"loss": 0.4501,
"step": 13175
},
{
"epoch": 6.4453125,
"grad_norm": 3.3539748191833496,
"learning_rate": 1e-05,
"loss": 0.4294,
"step": 13200
},
{
"epoch": 6.45751953125,
"grad_norm": 3.745314836502075,
"learning_rate": 1e-05,
"loss": 0.4038,
"step": 13225
},
{
"epoch": 6.4697265625,
"grad_norm": 4.028524398803711,
"learning_rate": 1e-05,
"loss": 0.4313,
"step": 13250
},
{
"epoch": 6.48193359375,
"grad_norm": 3.058047294616699,
"learning_rate": 1e-05,
"loss": 0.4215,
"step": 13275
},
{
"epoch": 6.494140625,
"grad_norm": 2.5536253452301025,
"learning_rate": 1e-05,
"loss": 0.4306,
"step": 13300
},
{
"epoch": 6.50634765625,
"grad_norm": 2.894327402114868,
"learning_rate": 1e-05,
"loss": 0.4211,
"step": 13325
},
{
"epoch": 6.5185546875,
"grad_norm": 4.001802444458008,
"learning_rate": 1e-05,
"loss": 0.4212,
"step": 13350
},
{
"epoch": 6.53076171875,
"grad_norm": 3.1187326908111572,
"learning_rate": 1e-05,
"loss": 0.4371,
"step": 13375
},
{
"epoch": 6.54296875,
"grad_norm": 2.8355751037597656,
"learning_rate": 1e-05,
"loss": 0.4356,
"step": 13400
},
{
"epoch": 6.55517578125,
"grad_norm": 4.038667678833008,
"learning_rate": 1e-05,
"loss": 0.4009,
"step": 13425
},
{
"epoch": 6.5673828125,
"grad_norm": 3.5054690837860107,
"learning_rate": 1e-05,
"loss": 0.4561,
"step": 13450
},
{
"epoch": 6.57958984375,
"grad_norm": 3.1248323917388916,
"learning_rate": 1e-05,
"loss": 0.4244,
"step": 13475
},
{
"epoch": 6.591796875,
"grad_norm": 3.1242165565490723,
"learning_rate": 1e-05,
"loss": 0.4456,
"step": 13500
},
{
"epoch": 6.591796875,
"eval_cer": 22.545454545454547,
"eval_loss": 0.3937221169471741,
"eval_normalized_cer": 17.79695848746404,
"eval_runtime": 131.9352,
"eval_samples_per_second": 0.97,
"eval_steps_per_second": 0.061,
"step": 13500
},
{
"epoch": 6.60400390625,
"grad_norm": 2.5615017414093018,
"learning_rate": 1e-05,
"loss": 0.4451,
"step": 13525
},
{
"epoch": 6.6162109375,
"grad_norm": 3.3750381469726562,
"learning_rate": 1e-05,
"loss": 0.4233,
"step": 13550
},
{
"epoch": 6.62841796875,
"grad_norm": 3.462449073791504,
"learning_rate": 1e-05,
"loss": 0.4426,
"step": 13575
},
{
"epoch": 6.640625,
"grad_norm": 2.785858154296875,
"learning_rate": 1e-05,
"loss": 0.4073,
"step": 13600
},
{
"epoch": 6.65283203125,
"grad_norm": 2.7007555961608887,
"learning_rate": 1e-05,
"loss": 0.438,
"step": 13625
},
{
"epoch": 6.6650390625,
"grad_norm": 2.3689684867858887,
"learning_rate": 1e-05,
"loss": 0.4226,
"step": 13650
},
{
"epoch": 6.67724609375,
"grad_norm": 3.3723831176757812,
"learning_rate": 1e-05,
"loss": 0.4279,
"step": 13675
},
{
"epoch": 6.689453125,
"grad_norm": 2.9226441383361816,
"learning_rate": 1e-05,
"loss": 0.4242,
"step": 13700
},
{
"epoch": 6.70166015625,
"grad_norm": 2.7876479625701904,
"learning_rate": 1e-05,
"loss": 0.4328,
"step": 13725
},
{
"epoch": 6.7138671875,
"grad_norm": 2.7610867023468018,
"learning_rate": 1e-05,
"loss": 0.4412,
"step": 13750
},
{
"epoch": 6.72607421875,
"grad_norm": 2.8302724361419678,
"learning_rate": 1e-05,
"loss": 0.4526,
"step": 13775
},
{
"epoch": 6.73828125,
"grad_norm": 3.1445977687835693,
"learning_rate": 1e-05,
"loss": 0.4544,
"step": 13800
},
{
"epoch": 6.75048828125,
"grad_norm": 3.8864002227783203,
"learning_rate": 1e-05,
"loss": 0.3965,
"step": 13825
},
{
"epoch": 6.7626953125,
"grad_norm": 3.2896971702575684,
"learning_rate": 1e-05,
"loss": 0.4182,
"step": 13850
},
{
"epoch": 6.77490234375,
"grad_norm": 4.539292335510254,
"learning_rate": 1e-05,
"loss": 0.4413,
"step": 13875
},
{
"epoch": 6.787109375,
"grad_norm": 3.2103347778320312,
"learning_rate": 1e-05,
"loss": 0.4346,
"step": 13900
},
{
"epoch": 6.79931640625,
"grad_norm": 3.929469585418701,
"learning_rate": 1e-05,
"loss": 0.4181,
"step": 13925
},
{
"epoch": 6.8115234375,
"grad_norm": 3.3086280822753906,
"learning_rate": 1e-05,
"loss": 0.3981,
"step": 13950
},
{
"epoch": 6.82373046875,
"grad_norm": 3.101243734359741,
"learning_rate": 1e-05,
"loss": 0.503,
"step": 13975
},
{
"epoch": 6.8359375,
"grad_norm": 3.602478265762329,
"learning_rate": 1e-05,
"loss": 0.4186,
"step": 14000
},
{
"epoch": 6.8359375,
"eval_cer": 22.472727272727273,
"eval_loss": 0.3899889588356018,
"eval_normalized_cer": 18.33127825729552,
"eval_runtime": 144.5925,
"eval_samples_per_second": 0.885,
"eval_steps_per_second": 0.055,
"step": 14000
},
{
"epoch": 6.84814453125,
"grad_norm": 3.0590033531188965,
"learning_rate": 1e-05,
"loss": 0.4268,
"step": 14025
},
{
"epoch": 6.8603515625,
"grad_norm": 4.508790969848633,
"learning_rate": 1e-05,
"loss": 0.4063,
"step": 14050
},
{
"epoch": 6.87255859375,
"grad_norm": 3.374329090118408,
"learning_rate": 1e-05,
"loss": 0.4482,
"step": 14075
},
{
"epoch": 6.884765625,
"grad_norm": 2.569153308868408,
"learning_rate": 1e-05,
"loss": 0.4477,
"step": 14100
},
{
"epoch": 6.89697265625,
"grad_norm": 2.5716049671173096,
"learning_rate": 1e-05,
"loss": 0.4282,
"step": 14125
},
{
"epoch": 6.9091796875,
"grad_norm": 2.990652561187744,
"learning_rate": 1e-05,
"loss": 0.4409,
"step": 14150
},
{
"epoch": 6.92138671875,
"grad_norm": 2.772998094558716,
"learning_rate": 1e-05,
"loss": 0.3622,
"step": 14175
},
{
"epoch": 6.93359375,
"grad_norm": 3.0832324028015137,
"learning_rate": 1e-05,
"loss": 0.4017,
"step": 14200
},
{
"epoch": 6.94580078125,
"grad_norm": 2.2825822830200195,
"learning_rate": 1e-05,
"loss": 0.4341,
"step": 14225
},
{
"epoch": 6.9580078125,
"grad_norm": 2.8773045539855957,
"learning_rate": 1e-05,
"loss": 0.4512,
"step": 14250
},
{
"epoch": 6.97021484375,
"grad_norm": 2.9340527057647705,
"learning_rate": 1e-05,
"loss": 0.4004,
"step": 14275
},
{
"epoch": 6.982421875,
"grad_norm": 3.3255691528320312,
"learning_rate": 1e-05,
"loss": 0.3775,
"step": 14300
},
{
"epoch": 6.99462890625,
"grad_norm": 3.9721550941467285,
"learning_rate": 1e-05,
"loss": 0.4239,
"step": 14325
},
{
"epoch": 7.0068359375,
"grad_norm": 2.075686454772949,
"learning_rate": 1e-05,
"loss": 0.3713,
"step": 14350
},
{
"epoch": 7.01904296875,
"grad_norm": 3.62715744972229,
"learning_rate": 1e-05,
"loss": 0.4379,
"step": 14375
},
{
"epoch": 7.03125,
"grad_norm": 2.931847333908081,
"learning_rate": 1e-05,
"loss": 0.3971,
"step": 14400
},
{
"epoch": 7.04345703125,
"grad_norm": 3.470655679702759,
"learning_rate": 1e-05,
"loss": 0.4529,
"step": 14425
},
{
"epoch": 7.0556640625,
"grad_norm": 3.3625166416168213,
"learning_rate": 1e-05,
"loss": 0.4197,
"step": 14450
},
{
"epoch": 7.06787109375,
"grad_norm": 3.200965404510498,
"learning_rate": 1e-05,
"loss": 0.4553,
"step": 14475
},
{
"epoch": 7.080078125,
"grad_norm": 3.83722186088562,
"learning_rate": 1e-05,
"loss": 0.4601,
"step": 14500
},
{
"epoch": 7.080078125,
"eval_cer": 20.727272727272727,
"eval_loss": 0.3850056231021881,
"eval_normalized_cer": 16.399506781750926,
"eval_runtime": 135.6446,
"eval_samples_per_second": 0.944,
"eval_steps_per_second": 0.059,
"step": 14500
},
{
"epoch": 7.09228515625,
"grad_norm": 4.523788928985596,
"learning_rate": 1e-05,
"loss": 0.4101,
"step": 14525
},
{
"epoch": 7.1044921875,
"grad_norm": 3.117316722869873,
"learning_rate": 1e-05,
"loss": 0.3924,
"step": 14550
},
{
"epoch": 7.11669921875,
"grad_norm": 2.9018518924713135,
"learning_rate": 1e-05,
"loss": 0.421,
"step": 14575
},
{
"epoch": 7.12890625,
"grad_norm": 4.131909370422363,
"learning_rate": 1e-05,
"loss": 0.4576,
"step": 14600
},
{
"epoch": 7.14111328125,
"grad_norm": 3.206359386444092,
"learning_rate": 1e-05,
"loss": 0.4068,
"step": 14625
},
{
"epoch": 7.1533203125,
"grad_norm": 4.659736156463623,
"learning_rate": 1e-05,
"loss": 0.3906,
"step": 14650
},
{
"epoch": 7.16552734375,
"grad_norm": 3.376612901687622,
"learning_rate": 1e-05,
"loss": 0.4197,
"step": 14675
},
{
"epoch": 7.177734375,
"grad_norm": 3.268672227859497,
"learning_rate": 1e-05,
"loss": 0.4122,
"step": 14700
},
{
"epoch": 7.18994140625,
"grad_norm": 3.0294389724731445,
"learning_rate": 1e-05,
"loss": 0.4154,
"step": 14725
},
{
"epoch": 7.2021484375,
"grad_norm": 3.547778367996216,
"learning_rate": 1e-05,
"loss": 0.4375,
"step": 14750
},
{
"epoch": 7.21435546875,
"grad_norm": 2.7753546237945557,
"learning_rate": 1e-05,
"loss": 0.4212,
"step": 14775
},
{
"epoch": 7.2265625,
"grad_norm": 3.724691152572632,
"learning_rate": 1e-05,
"loss": 0.4074,
"step": 14800
},
{
"epoch": 7.23876953125,
"grad_norm": 2.489899158477783,
"learning_rate": 1e-05,
"loss": 0.4094,
"step": 14825
},
{
"epoch": 7.2509765625,
"grad_norm": 3.542677879333496,
"learning_rate": 1e-05,
"loss": 0.4034,
"step": 14850
},
{
"epoch": 7.26318359375,
"grad_norm": 3.15748929977417,
"learning_rate": 1e-05,
"loss": 0.4553,
"step": 14875
},
{
"epoch": 7.275390625,
"grad_norm": 3.477710008621216,
"learning_rate": 1e-05,
"loss": 0.4394,
"step": 14900
},
{
"epoch": 7.28759765625,
"grad_norm": 4.362137794494629,
"learning_rate": 1e-05,
"loss": 0.4311,
"step": 14925
},
{
"epoch": 7.2998046875,
"grad_norm": 2.3837854862213135,
"learning_rate": 1e-05,
"loss": 0.4179,
"step": 14950
},
{
"epoch": 7.31201171875,
"grad_norm": 3.1717562675476074,
"learning_rate": 1e-05,
"loss": 0.4504,
"step": 14975
},
{
"epoch": 7.32421875,
"grad_norm": 2.988471269607544,
"learning_rate": 1e-05,
"loss": 0.4357,
"step": 15000
},
{
"epoch": 7.32421875,
"eval_cer": 23.599999999999998,
"eval_loss": 0.38635486364364624,
"eval_normalized_cer": 20.098643649815042,
"eval_runtime": 132.5727,
"eval_samples_per_second": 0.966,
"eval_steps_per_second": 0.06,
"step": 15000
},
{
"epoch": 7.33642578125,
"grad_norm": 3.9990575313568115,
"learning_rate": 1e-05,
"loss": 0.4263,
"step": 15025
},
{
"epoch": 7.3486328125,
"grad_norm": 2.6988320350646973,
"learning_rate": 1e-05,
"loss": 0.4311,
"step": 15050
},
{
"epoch": 7.36083984375,
"grad_norm": 3.537297487258911,
"learning_rate": 1e-05,
"loss": 0.4552,
"step": 15075
},
{
"epoch": 7.373046875,
"grad_norm": 2.706217050552368,
"learning_rate": 1e-05,
"loss": 0.4365,
"step": 15100
},
{
"epoch": 7.38525390625,
"grad_norm": 3.2613677978515625,
"learning_rate": 1e-05,
"loss": 0.4512,
"step": 15125
},
{
"epoch": 7.3974609375,
"grad_norm": 3.039867877960205,
"learning_rate": 1e-05,
"loss": 0.4485,
"step": 15150
},
{
"epoch": 7.40966796875,
"grad_norm": 3.6644127368927,
"learning_rate": 1e-05,
"loss": 0.4218,
"step": 15175
},
{
"epoch": 7.421875,
"grad_norm": 3.3461201190948486,
"learning_rate": 1e-05,
"loss": 0.4292,
"step": 15200
},
{
"epoch": 7.43408203125,
"grad_norm": 2.8256707191467285,
"learning_rate": 1e-05,
"loss": 0.4205,
"step": 15225
},
{
"epoch": 7.4462890625,
"grad_norm": 3.3773632049560547,
"learning_rate": 1e-05,
"loss": 0.4155,
"step": 15250
},
{
"epoch": 7.45849609375,
"grad_norm": 2.6571528911590576,
"learning_rate": 1e-05,
"loss": 0.4567,
"step": 15275
},
{
"epoch": 7.470703125,
"grad_norm": 3.0483086109161377,
"learning_rate": 1e-05,
"loss": 0.4293,
"step": 15300
},
{
"epoch": 7.48291015625,
"grad_norm": 3.6026933193206787,
"learning_rate": 1e-05,
"loss": 0.426,
"step": 15325
},
{
"epoch": 7.4951171875,
"grad_norm": 4.094547748565674,
"learning_rate": 1e-05,
"loss": 0.4562,
"step": 15350
},
{
"epoch": 7.50732421875,
"grad_norm": 3.6871438026428223,
"learning_rate": 1e-05,
"loss": 0.4438,
"step": 15375
},
{
"epoch": 7.51953125,
"grad_norm": 3.5841362476348877,
"learning_rate": 1e-05,
"loss": 0.3874,
"step": 15400
},
{
"epoch": 7.53173828125,
"grad_norm": 3.3375258445739746,
"learning_rate": 1e-05,
"loss": 0.4234,
"step": 15425
},
{
"epoch": 7.5439453125,
"grad_norm": 4.966141223907471,
"learning_rate": 1e-05,
"loss": 0.4329,
"step": 15450
},
{
"epoch": 7.55615234375,
"grad_norm": 2.2988944053649902,
"learning_rate": 1e-05,
"loss": 0.4377,
"step": 15475
},
{
"epoch": 7.568359375,
"grad_norm": 3.4808993339538574,
"learning_rate": 1e-05,
"loss": 0.4375,
"step": 15500
},
{
"epoch": 7.568359375,
"eval_cer": 23.854545454545452,
"eval_loss": 0.38098400831222534,
"eval_normalized_cer": 19.399917796958487,
"eval_runtime": 137.4032,
"eval_samples_per_second": 0.932,
"eval_steps_per_second": 0.058,
"step": 15500
},
{
"epoch": 7.58056640625,
"grad_norm": 4.054482936859131,
"learning_rate": 1e-05,
"loss": 0.3687,
"step": 15525
},
{
"epoch": 7.5927734375,
"grad_norm": 2.874267578125,
"learning_rate": 1e-05,
"loss": 0.4033,
"step": 15550
},
{
"epoch": 7.60498046875,
"grad_norm": 2.669652223587036,
"learning_rate": 1e-05,
"loss": 0.4262,
"step": 15575
},
{
"epoch": 7.6171875,
"grad_norm": 3.2882497310638428,
"learning_rate": 1e-05,
"loss": 0.4163,
"step": 15600
},
{
"epoch": 7.62939453125,
"grad_norm": 4.546947479248047,
"learning_rate": 1e-05,
"loss": 0.4668,
"step": 15625
},
{
"epoch": 7.6416015625,
"grad_norm": 2.2762033939361572,
"learning_rate": 1e-05,
"loss": 0.4313,
"step": 15650
},
{
"epoch": 7.65380859375,
"grad_norm": 3.4660661220550537,
"learning_rate": 1e-05,
"loss": 0.4181,
"step": 15675
},
{
"epoch": 7.666015625,
"grad_norm": 3.2215335369110107,
"learning_rate": 1e-05,
"loss": 0.4114,
"step": 15700
},
{
"epoch": 7.67822265625,
"grad_norm": 3.1161818504333496,
"learning_rate": 1e-05,
"loss": 0.4599,
"step": 15725
},
{
"epoch": 7.6904296875,
"grad_norm": 3.0069620609283447,
"learning_rate": 1e-05,
"loss": 0.4498,
"step": 15750
},
{
"epoch": 7.70263671875,
"grad_norm": 2.84161114692688,
"learning_rate": 1e-05,
"loss": 0.4252,
"step": 15775
},
{
"epoch": 7.71484375,
"grad_norm": 4.6650590896606445,
"learning_rate": 1e-05,
"loss": 0.4297,
"step": 15800
},
{
"epoch": 7.72705078125,
"grad_norm": 2.6274569034576416,
"learning_rate": 1e-05,
"loss": 0.3753,
"step": 15825
},
{
"epoch": 7.7392578125,
"grad_norm": 3.5292246341705322,
"learning_rate": 1e-05,
"loss": 0.4168,
"step": 15850
},
{
"epoch": 7.75146484375,
"grad_norm": 2.8617982864379883,
"learning_rate": 1e-05,
"loss": 0.4809,
"step": 15875
},
{
"epoch": 7.763671875,
"grad_norm": 3.669074773788452,
"learning_rate": 1e-05,
"loss": 0.4613,
"step": 15900
},
{
"epoch": 7.77587890625,
"grad_norm": 2.994079113006592,
"learning_rate": 1e-05,
"loss": 0.3902,
"step": 15925
},
{
"epoch": 7.7880859375,
"grad_norm": 2.6492769718170166,
"learning_rate": 1e-05,
"loss": 0.3756,
"step": 15950
},
{
"epoch": 7.80029296875,
"grad_norm": 3.2743895053863525,
"learning_rate": 1e-05,
"loss": 0.4476,
"step": 15975
},
{
"epoch": 7.8125,
"grad_norm": 3.279031276702881,
"learning_rate": 1e-05,
"loss": 0.4563,
"step": 16000
},
{
"epoch": 7.8125,
"eval_cer": 23.381818181818183,
"eval_loss": 0.38140711188316345,
"eval_normalized_cer": 18.90669954788327,
"eval_runtime": 135.0002,
"eval_samples_per_second": 0.948,
"eval_steps_per_second": 0.059,
"step": 16000
},
{
"epoch": 7.82470703125,
"grad_norm": 2.7851359844207764,
"learning_rate": 1e-05,
"loss": 0.4344,
"step": 16025
},
{
"epoch": 7.8369140625,
"grad_norm": 3.965728282928467,
"learning_rate": 1e-05,
"loss": 0.4328,
"step": 16050
},
{
"epoch": 7.84912109375,
"grad_norm": 4.293801307678223,
"learning_rate": 1e-05,
"loss": 0.4593,
"step": 16075
},
{
"epoch": 7.861328125,
"grad_norm": 3.0265841484069824,
"learning_rate": 1e-05,
"loss": 0.4039,
"step": 16100
},
{
"epoch": 7.87353515625,
"grad_norm": 3.9985835552215576,
"learning_rate": 1e-05,
"loss": 0.3911,
"step": 16125
},
{
"epoch": 7.8857421875,
"grad_norm": 4.016017436981201,
"learning_rate": 1e-05,
"loss": 0.458,
"step": 16150
},
{
"epoch": 7.89794921875,
"grad_norm": 3.816105842590332,
"learning_rate": 1e-05,
"loss": 0.4125,
"step": 16175
},
{
"epoch": 7.91015625,
"grad_norm": 2.5083696842193604,
"learning_rate": 1e-05,
"loss": 0.3769,
"step": 16200
},
{
"epoch": 7.92236328125,
"grad_norm": 3.251554012298584,
"learning_rate": 1e-05,
"loss": 0.4705,
"step": 16225
},
{
"epoch": 7.9345703125,
"grad_norm": 3.370077610015869,
"learning_rate": 1e-05,
"loss": 0.4565,
"step": 16250
},
{
"epoch": 7.94677734375,
"grad_norm": 2.996699810028076,
"learning_rate": 1e-05,
"loss": 0.461,
"step": 16275
},
{
"epoch": 7.958984375,
"grad_norm": 4.30794620513916,
"learning_rate": 1e-05,
"loss": 0.4035,
"step": 16300
},
{
"epoch": 7.97119140625,
"grad_norm": 3.3062832355499268,
"learning_rate": 1e-05,
"loss": 0.4067,
"step": 16325
},
{
"epoch": 7.9833984375,
"grad_norm": 2.4719371795654297,
"learning_rate": 1e-05,
"loss": 0.4868,
"step": 16350
},
{
"epoch": 7.99560546875,
"grad_norm": 3.1429994106292725,
"learning_rate": 1e-05,
"loss": 0.4341,
"step": 16375
},
{
"epoch": 8.0078125,
"grad_norm": 4.408067226409912,
"learning_rate": 1e-05,
"loss": 0.4874,
"step": 16400
},
{
"epoch": 8.02001953125,
"grad_norm": 3.3049044609069824,
"learning_rate": 1e-05,
"loss": 0.4783,
"step": 16425
},
{
"epoch": 8.0322265625,
"grad_norm": 2.1289191246032715,
"learning_rate": 1e-05,
"loss": 0.3918,
"step": 16450
},
{
"epoch": 8.04443359375,
"grad_norm": 3.2784934043884277,
"learning_rate": 1e-05,
"loss": 0.3953,
"step": 16475
},
{
"epoch": 8.056640625,
"grad_norm": 2.8000524044036865,
"learning_rate": 1e-05,
"loss": 0.4145,
"step": 16500
},
{
"epoch": 8.056640625,
"eval_cer": 19.236363636363638,
"eval_loss": 0.3803868889808655,
"eval_normalized_cer": 15.207562679819153,
"eval_runtime": 142.7418,
"eval_samples_per_second": 0.897,
"eval_steps_per_second": 0.056,
"step": 16500
},
{
"epoch": 8.06884765625,
"grad_norm": 4.039844989776611,
"learning_rate": 1e-05,
"loss": 0.4458,
"step": 16525
},
{
"epoch": 8.0810546875,
"grad_norm": 3.5788333415985107,
"learning_rate": 1e-05,
"loss": 0.4211,
"step": 16550
},
{
"epoch": 8.09326171875,
"grad_norm": 2.7754452228546143,
"learning_rate": 1e-05,
"loss": 0.3947,
"step": 16575
},
{
"epoch": 8.10546875,
"grad_norm": 3.6685307025909424,
"learning_rate": 1e-05,
"loss": 0.4371,
"step": 16600
},
{
"epoch": 8.11767578125,
"grad_norm": 4.283354759216309,
"learning_rate": 1e-05,
"loss": 0.4196,
"step": 16625
},
{
"epoch": 8.1298828125,
"grad_norm": 3.37106990814209,
"learning_rate": 1e-05,
"loss": 0.4286,
"step": 16650
},
{
"epoch": 8.14208984375,
"grad_norm": 3.3839213848114014,
"learning_rate": 1e-05,
"loss": 0.4081,
"step": 16675
},
{
"epoch": 8.154296875,
"grad_norm": 2.9974148273468018,
"learning_rate": 1e-05,
"loss": 0.4218,
"step": 16700
},
{
"epoch": 8.16650390625,
"grad_norm": 2.861589193344116,
"learning_rate": 1e-05,
"loss": 0.3866,
"step": 16725
},
{
"epoch": 8.1787109375,
"grad_norm": 3.7804067134857178,
"learning_rate": 1e-05,
"loss": 0.402,
"step": 16750
},
{
"epoch": 8.19091796875,
"grad_norm": 4.124424457550049,
"learning_rate": 1e-05,
"loss": 0.4198,
"step": 16775
},
{
"epoch": 8.203125,
"grad_norm": 3.785266876220703,
"learning_rate": 1e-05,
"loss": 0.4744,
"step": 16800
},
{
"epoch": 8.21533203125,
"grad_norm": 2.816866636276245,
"learning_rate": 1e-05,
"loss": 0.4509,
"step": 16825
},
{
"epoch": 8.2275390625,
"grad_norm": 2.5543909072875977,
"learning_rate": 1e-05,
"loss": 0.4379,
"step": 16850
},
{
"epoch": 8.23974609375,
"grad_norm": 3.561476707458496,
"learning_rate": 1e-05,
"loss": 0.4109,
"step": 16875
},
{
"epoch": 8.251953125,
"grad_norm": 2.819086790084839,
"learning_rate": 1e-05,
"loss": 0.4265,
"step": 16900
},
{
"epoch": 8.26416015625,
"grad_norm": 3.327193021774292,
"learning_rate": 1e-05,
"loss": 0.3561,
"step": 16925
},
{
"epoch": 8.2763671875,
"grad_norm": 2.7188267707824707,
"learning_rate": 1e-05,
"loss": 0.405,
"step": 16950
},
{
"epoch": 8.28857421875,
"grad_norm": 4.3515543937683105,
"learning_rate": 1e-05,
"loss": 0.4097,
"step": 16975
},
{
"epoch": 8.30078125,
"grad_norm": 3.3475663661956787,
"learning_rate": 1e-05,
"loss": 0.3892,
"step": 17000
},
{
"epoch": 8.30078125,
"eval_cer": 22.581818181818182,
"eval_loss": 0.38389354944229126,
"eval_normalized_cer": 18.16687217427045,
"eval_runtime": 140.8271,
"eval_samples_per_second": 0.909,
"eval_steps_per_second": 0.057,
"step": 17000
},
{
"epoch": 8.31298828125,
"grad_norm": 3.1067376136779785,
"learning_rate": 1e-05,
"loss": 0.402,
"step": 17025
},
{
"epoch": 8.3251953125,
"grad_norm": 4.873087406158447,
"learning_rate": 1e-05,
"loss": 0.4059,
"step": 17050
},
{
"epoch": 8.33740234375,
"grad_norm": 3.115588665008545,
"learning_rate": 1e-05,
"loss": 0.4014,
"step": 17075
},
{
"epoch": 8.349609375,
"grad_norm": 4.50831413269043,
"learning_rate": 1e-05,
"loss": 0.4337,
"step": 17100
},
{
"epoch": 8.36181640625,
"grad_norm": 2.399965763092041,
"learning_rate": 1e-05,
"loss": 0.4034,
"step": 17125
},
{
"epoch": 8.3740234375,
"grad_norm": 3.1204044818878174,
"learning_rate": 1e-05,
"loss": 0.4344,
"step": 17150
},
{
"epoch": 8.38623046875,
"grad_norm": 3.457709789276123,
"learning_rate": 1e-05,
"loss": 0.4311,
"step": 17175
},
{
"epoch": 8.3984375,
"grad_norm": 2.5090980529785156,
"learning_rate": 1e-05,
"loss": 0.4631,
"step": 17200
},
{
"epoch": 8.41064453125,
"grad_norm": 2.9333646297454834,
"learning_rate": 1e-05,
"loss": 0.4329,
"step": 17225
},
{
"epoch": 8.4228515625,
"grad_norm": 2.8422629833221436,
"learning_rate": 1e-05,
"loss": 0.3823,
"step": 17250
},
{
"epoch": 8.43505859375,
"grad_norm": 2.7649292945861816,
"learning_rate": 1e-05,
"loss": 0.4538,
"step": 17275
},
{
"epoch": 8.447265625,
"grad_norm": 3.3747799396514893,
"learning_rate": 1e-05,
"loss": 0.416,
"step": 17300
},
{
"epoch": 8.45947265625,
"grad_norm": 4.490537643432617,
"learning_rate": 1e-05,
"loss": 0.3744,
"step": 17325
},
{
"epoch": 8.4716796875,
"grad_norm": 3.027010202407837,
"learning_rate": 1e-05,
"loss": 0.3874,
"step": 17350
},
{
"epoch": 8.48388671875,
"grad_norm": 2.616095542907715,
"learning_rate": 1e-05,
"loss": 0.3695,
"step": 17375
},
{
"epoch": 8.49609375,
"grad_norm": 3.389639377593994,
"learning_rate": 1e-05,
"loss": 0.4259,
"step": 17400
},
{
"epoch": 8.50830078125,
"grad_norm": 4.086320400238037,
"learning_rate": 1e-05,
"loss": 0.4047,
"step": 17425
},
{
"epoch": 8.5205078125,
"grad_norm": 2.4645962715148926,
"learning_rate": 1e-05,
"loss": 0.4357,
"step": 17450
},
{
"epoch": 8.53271484375,
"grad_norm": 2.2121071815490723,
"learning_rate": 1e-05,
"loss": 0.4381,
"step": 17475
},
{
"epoch": 8.544921875,
"grad_norm": 2.978116512298584,
"learning_rate": 1e-05,
"loss": 0.3699,
"step": 17500
},
{
"epoch": 8.544921875,
"eval_cer": 20.0,
"eval_loss": 0.382442444562912,
"eval_normalized_cer": 16.276202219482123,
"eval_runtime": 146.3607,
"eval_samples_per_second": 0.875,
"eval_steps_per_second": 0.055,
"step": 17500
},
{
"epoch": 8.55712890625,
"grad_norm": 3.6570897102355957,
"learning_rate": 1e-05,
"loss": 0.4495,
"step": 17525
},
{
"epoch": 8.5693359375,
"grad_norm": 3.3602068424224854,
"learning_rate": 1e-05,
"loss": 0.4132,
"step": 17550
},
{
"epoch": 8.58154296875,
"grad_norm": 3.4608097076416016,
"learning_rate": 1e-05,
"loss": 0.4641,
"step": 17575
},
{
"epoch": 8.59375,
"grad_norm": 3.122091293334961,
"learning_rate": 1e-05,
"loss": 0.4325,
"step": 17600
},
{
"epoch": 8.60595703125,
"grad_norm": 4.238564491271973,
"learning_rate": 1e-05,
"loss": 0.4002,
"step": 17625
},
{
"epoch": 8.6181640625,
"grad_norm": 3.2410008907318115,
"learning_rate": 1e-05,
"loss": 0.4678,
"step": 17650
},
{
"epoch": 8.63037109375,
"grad_norm": 2.8786799907684326,
"learning_rate": 1e-05,
"loss": 0.3947,
"step": 17675
},
{
"epoch": 8.642578125,
"grad_norm": 2.8009581565856934,
"learning_rate": 1e-05,
"loss": 0.4154,
"step": 17700
},
{
"epoch": 8.65478515625,
"grad_norm": 3.148653507232666,
"learning_rate": 1e-05,
"loss": 0.3856,
"step": 17725
},
{
"epoch": 8.6669921875,
"grad_norm": 3.803799867630005,
"learning_rate": 1e-05,
"loss": 0.4285,
"step": 17750
},
{
"epoch": 8.67919921875,
"grad_norm": 3.944755792617798,
"learning_rate": 1e-05,
"loss": 0.4598,
"step": 17775
},
{
"epoch": 8.69140625,
"grad_norm": 3.1085519790649414,
"learning_rate": 1e-05,
"loss": 0.4379,
"step": 17800
},
{
"epoch": 8.70361328125,
"grad_norm": 5.655707836151123,
"learning_rate": 1e-05,
"loss": 0.4121,
"step": 17825
},
{
"epoch": 8.7158203125,
"grad_norm": 2.179668664932251,
"learning_rate": 1e-05,
"loss": 0.4212,
"step": 17850
},
{
"epoch": 8.72802734375,
"grad_norm": 3.687593460083008,
"learning_rate": 1e-05,
"loss": 0.4643,
"step": 17875
},
{
"epoch": 8.740234375,
"grad_norm": 2.9461138248443604,
"learning_rate": 1e-05,
"loss": 0.4126,
"step": 17900
},
{
"epoch": 8.75244140625,
"grad_norm": 3.524925470352173,
"learning_rate": 1e-05,
"loss": 0.4407,
"step": 17925
},
{
"epoch": 8.7646484375,
"grad_norm": 4.609610080718994,
"learning_rate": 1e-05,
"loss": 0.42,
"step": 17950
},
{
"epoch": 8.77685546875,
"grad_norm": 4.5199079513549805,
"learning_rate": 1e-05,
"loss": 0.3921,
"step": 17975
},
{
"epoch": 8.7890625,
"grad_norm": 3.358597993850708,
"learning_rate": 1e-05,
"loss": 0.441,
"step": 18000
},
{
"epoch": 8.7890625,
"eval_cer": 21.854545454545455,
"eval_loss": 0.38204386830329895,
"eval_normalized_cer": 16.358405260994658,
"eval_runtime": 141.6466,
"eval_samples_per_second": 0.904,
"eval_steps_per_second": 0.056,
"step": 18000
},
{
"epoch": 8.80126953125,
"grad_norm": 3.380112648010254,
"learning_rate": 1e-05,
"loss": 0.3832,
"step": 18025
},
{
"epoch": 8.8134765625,
"grad_norm": 3.249871015548706,
"learning_rate": 1e-05,
"loss": 0.4449,
"step": 18050
},
{
"epoch": 8.82568359375,
"grad_norm": 3.3038084506988525,
"learning_rate": 1e-05,
"loss": 0.4919,
"step": 18075
},
{
"epoch": 8.837890625,
"grad_norm": 3.6768815517425537,
"learning_rate": 1e-05,
"loss": 0.42,
"step": 18100
},
{
"epoch": 8.85009765625,
"grad_norm": 2.7637171745300293,
"learning_rate": 1e-05,
"loss": 0.4275,
"step": 18125
},
{
"epoch": 8.8623046875,
"grad_norm": 2.896497964859009,
"learning_rate": 1e-05,
"loss": 0.3999,
"step": 18150
},
{
"epoch": 8.87451171875,
"grad_norm": 2.759514570236206,
"learning_rate": 1e-05,
"loss": 0.4459,
"step": 18175
},
{
"epoch": 8.88671875,
"grad_norm": 3.696629762649536,
"learning_rate": 1e-05,
"loss": 0.5002,
"step": 18200
},
{
"epoch": 8.89892578125,
"grad_norm": 2.6874115467071533,
"learning_rate": 1e-05,
"loss": 0.4143,
"step": 18225
},
{
"epoch": 8.9111328125,
"grad_norm": 3.066502809524536,
"learning_rate": 1e-05,
"loss": 0.3943,
"step": 18250
},
{
"epoch": 8.92333984375,
"grad_norm": 3.2830264568328857,
"learning_rate": 1e-05,
"loss": 0.3878,
"step": 18275
},
{
"epoch": 8.935546875,
"grad_norm": 3.5021724700927734,
"learning_rate": 1e-05,
"loss": 0.4332,
"step": 18300
},
{
"epoch": 8.94775390625,
"grad_norm": 4.337588310241699,
"learning_rate": 1e-05,
"loss": 0.4256,
"step": 18325
},
{
"epoch": 8.9599609375,
"grad_norm": 3.243098258972168,
"learning_rate": 1e-05,
"loss": 0.4593,
"step": 18350
},
{
"epoch": 8.97216796875,
"grad_norm": 2.650510787963867,
"learning_rate": 1e-05,
"loss": 0.3951,
"step": 18375
},
{
"epoch": 8.984375,
"grad_norm": 3.37617564201355,
"learning_rate": 1e-05,
"loss": 0.4143,
"step": 18400
},
{
"epoch": 8.99658203125,
"grad_norm": 3.7461116313934326,
"learning_rate": 1e-05,
"loss": 0.4147,
"step": 18425
},
{
"epoch": 9.0087890625,
"grad_norm": 4.165828704833984,
"learning_rate": 1e-05,
"loss": 0.4538,
"step": 18450
},
{
"epoch": 9.02099609375,
"grad_norm": 3.3768396377563477,
"learning_rate": 1e-05,
"loss": 0.4128,
"step": 18475
},
{
"epoch": 9.033203125,
"grad_norm": 3.006622314453125,
"learning_rate": 1e-05,
"loss": 0.427,
"step": 18500
},
{
"epoch": 9.033203125,
"eval_cer": 21.054545454545455,
"eval_loss": 0.3800523281097412,
"eval_normalized_cer": 16.85162351006987,
"eval_runtime": 92.9904,
"eval_samples_per_second": 1.376,
"eval_steps_per_second": 0.086,
"step": 18500
},
{
"epoch": 9.04541015625,
"grad_norm": 2.577570915222168,
"learning_rate": 1e-05,
"loss": 0.4233,
"step": 18525
},
{
"epoch": 9.0576171875,
"grad_norm": 3.2769463062286377,
"learning_rate": 1e-05,
"loss": 0.4209,
"step": 18550
},
{
"epoch": 9.06982421875,
"grad_norm": 3.296905040740967,
"learning_rate": 1e-05,
"loss": 0.4576,
"step": 18575
},
{
"epoch": 9.08203125,
"grad_norm": 2.849393367767334,
"learning_rate": 1e-05,
"loss": 0.4033,
"step": 18600
},
{
"epoch": 9.09423828125,
"grad_norm": 4.293519496917725,
"learning_rate": 1e-05,
"loss": 0.4604,
"step": 18625
},
{
"epoch": 9.1064453125,
"grad_norm": 3.5369253158569336,
"learning_rate": 1e-05,
"loss": 0.444,
"step": 18650
},
{
"epoch": 9.11865234375,
"grad_norm": 4.512628555297852,
"learning_rate": 1e-05,
"loss": 0.4008,
"step": 18675
},
{
"epoch": 9.130859375,
"grad_norm": 3.0092594623565674,
"learning_rate": 1e-05,
"loss": 0.4219,
"step": 18700
},
{
"epoch": 9.14306640625,
"grad_norm": 3.3797607421875,
"learning_rate": 1e-05,
"loss": 0.4102,
"step": 18725
},
{
"epoch": 9.1552734375,
"grad_norm": 3.1966211795806885,
"learning_rate": 1e-05,
"loss": 0.4152,
"step": 18750
},
{
"epoch": 9.16748046875,
"grad_norm": 3.016399383544922,
"learning_rate": 1e-05,
"loss": 0.4277,
"step": 18775
},
{
"epoch": 9.1796875,
"grad_norm": 3.313685417175293,
"learning_rate": 1e-05,
"loss": 0.3932,
"step": 18800
},
{
"epoch": 9.19189453125,
"grad_norm": 2.8252785205841064,
"learning_rate": 1e-05,
"loss": 0.4041,
"step": 18825
},
{
"epoch": 9.2041015625,
"grad_norm": 2.935676097869873,
"learning_rate": 1e-05,
"loss": 0.4107,
"step": 18850
},
{
"epoch": 9.21630859375,
"grad_norm": 2.280316114425659,
"learning_rate": 1e-05,
"loss": 0.4701,
"step": 18875
},
{
"epoch": 9.228515625,
"grad_norm": 2.718478202819824,
"learning_rate": 1e-05,
"loss": 0.4174,
"step": 18900
},
{
"epoch": 9.24072265625,
"grad_norm": 4.804378986358643,
"learning_rate": 1e-05,
"loss": 0.3947,
"step": 18925
},
{
"epoch": 9.2529296875,
"grad_norm": 4.070915222167969,
"learning_rate": 1e-05,
"loss": 0.4158,
"step": 18950
},
{
"epoch": 9.26513671875,
"grad_norm": 3.2516276836395264,
"learning_rate": 1e-05,
"loss": 0.4379,
"step": 18975
},
{
"epoch": 9.27734375,
"grad_norm": 3.5097761154174805,
"learning_rate": 1e-05,
"loss": 0.3767,
"step": 19000
},
{
"epoch": 9.27734375,
"eval_cer": 24.254545454545454,
"eval_loss": 0.36628666520118713,
"eval_normalized_cer": 18.701191944101932,
"eval_runtime": 93.7861,
"eval_samples_per_second": 1.365,
"eval_steps_per_second": 0.085,
"step": 19000
},
{
"epoch": 9.28955078125,
"grad_norm": 4.052979469299316,
"learning_rate": 1e-05,
"loss": 0.446,
"step": 19025
},
{
"epoch": 9.3017578125,
"grad_norm": 1.887149691581726,
"learning_rate": 1e-05,
"loss": 0.3933,
"step": 19050
},
{
"epoch": 9.31396484375,
"grad_norm": 3.2341785430908203,
"learning_rate": 1e-05,
"loss": 0.4361,
"step": 19075
},
{
"epoch": 9.326171875,
"grad_norm": 2.683950185775757,
"learning_rate": 1e-05,
"loss": 0.4107,
"step": 19100
},
{
"epoch": 9.33837890625,
"grad_norm": 3.6661105155944824,
"learning_rate": 1e-05,
"loss": 0.396,
"step": 19125
},
{
"epoch": 9.3505859375,
"grad_norm": 2.379519462585449,
"learning_rate": 1e-05,
"loss": 0.3875,
"step": 19150
},
{
"epoch": 9.36279296875,
"grad_norm": 2.952665090560913,
"learning_rate": 1e-05,
"loss": 0.4471,
"step": 19175
},
{
"epoch": 9.375,
"grad_norm": 3.1039767265319824,
"learning_rate": 1e-05,
"loss": 0.4052,
"step": 19200
},
{
"epoch": 9.38720703125,
"grad_norm": 3.941380023956299,
"learning_rate": 1e-05,
"loss": 0.3908,
"step": 19225
},
{
"epoch": 9.3994140625,
"grad_norm": 2.511928081512451,
"learning_rate": 1e-05,
"loss": 0.4215,
"step": 19250
},
{
"epoch": 9.41162109375,
"grad_norm": 2.6684021949768066,
"learning_rate": 1e-05,
"loss": 0.409,
"step": 19275
},
{
"epoch": 9.423828125,
"grad_norm": 3.9744958877563477,
"learning_rate": 1e-05,
"loss": 0.4778,
"step": 19300
},
{
"epoch": 9.43603515625,
"grad_norm": 2.601891279220581,
"learning_rate": 1e-05,
"loss": 0.4059,
"step": 19325
},
{
"epoch": 9.4482421875,
"grad_norm": 3.2740561962127686,
"learning_rate": 1e-05,
"loss": 0.4299,
"step": 19350
},
{
"epoch": 9.46044921875,
"grad_norm": 4.9015889167785645,
"learning_rate": 1e-05,
"loss": 0.4427,
"step": 19375
},
{
"epoch": 9.47265625,
"grad_norm": 2.882230281829834,
"learning_rate": 1e-05,
"loss": 0.3959,
"step": 19400
},
{
"epoch": 9.48486328125,
"grad_norm": 4.404541969299316,
"learning_rate": 1e-05,
"loss": 0.414,
"step": 19425
},
{
"epoch": 9.4970703125,
"grad_norm": 3.5113275051116943,
"learning_rate": 1e-05,
"loss": 0.3937,
"step": 19450
},
{
"epoch": 9.50927734375,
"grad_norm": 2.4990580081939697,
"learning_rate": 1e-05,
"loss": 0.4262,
"step": 19475
},
{
"epoch": 9.521484375,
"grad_norm": 3.3028454780578613,
"learning_rate": 1e-05,
"loss": 0.4519,
"step": 19500
},
{
"epoch": 9.521484375,
"eval_cer": 20.472727272727273,
"eval_loss": 0.37869974970817566,
"eval_normalized_cer": 15.454171804356761,
"eval_runtime": 97.3092,
"eval_samples_per_second": 1.315,
"eval_steps_per_second": 0.082,
"step": 19500
},
{
"epoch": 9.53369140625,
"grad_norm": 3.5164365768432617,
"learning_rate": 1e-05,
"loss": 0.4119,
"step": 19525
},
{
"epoch": 9.5458984375,
"grad_norm": 3.049210786819458,
"learning_rate": 1e-05,
"loss": 0.4198,
"step": 19550
},
{
"epoch": 9.55810546875,
"grad_norm": 2.659877300262451,
"learning_rate": 1e-05,
"loss": 0.4047,
"step": 19575
},
{
"epoch": 9.5703125,
"grad_norm": 2.617429733276367,
"learning_rate": 1e-05,
"loss": 0.446,
"step": 19600
},
{
"epoch": 9.58251953125,
"grad_norm": 3.912100076675415,
"learning_rate": 1e-05,
"loss": 0.4169,
"step": 19625
},
{
"epoch": 9.5947265625,
"grad_norm": 2.6492602825164795,
"learning_rate": 1e-05,
"loss": 0.4215,
"step": 19650
},
{
"epoch": 9.60693359375,
"grad_norm": 3.404269218444824,
"learning_rate": 1e-05,
"loss": 0.3782,
"step": 19675
},
{
"epoch": 9.619140625,
"grad_norm": 2.4062957763671875,
"learning_rate": 1e-05,
"loss": 0.4342,
"step": 19700
},
{
"epoch": 9.63134765625,
"grad_norm": 2.835569143295288,
"learning_rate": 1e-05,
"loss": 0.372,
"step": 19725
},
{
"epoch": 9.6435546875,
"grad_norm": 2.3654327392578125,
"learning_rate": 1e-05,
"loss": 0.4095,
"step": 19750
},
{
"epoch": 9.65576171875,
"grad_norm": 2.8574941158294678,
"learning_rate": 1e-05,
"loss": 0.4225,
"step": 19775
},
{
"epoch": 9.66796875,
"grad_norm": 3.122192859649658,
"learning_rate": 1e-05,
"loss": 0.4372,
"step": 19800
},
{
"epoch": 9.68017578125,
"grad_norm": 2.591271162033081,
"learning_rate": 1e-05,
"loss": 0.4523,
"step": 19825
},
{
"epoch": 9.6923828125,
"grad_norm": 3.0938827991485596,
"learning_rate": 1e-05,
"loss": 0.4055,
"step": 19850
},
{
"epoch": 9.70458984375,
"grad_norm": 3.643186569213867,
"learning_rate": 1e-05,
"loss": 0.4066,
"step": 19875
},
{
"epoch": 9.716796875,
"grad_norm": 3.9704155921936035,
"learning_rate": 1e-05,
"loss": 0.4615,
"step": 19900
},
{
"epoch": 9.72900390625,
"grad_norm": 2.1930134296417236,
"learning_rate": 1e-05,
"loss": 0.4453,
"step": 19925
},
{
"epoch": 9.7412109375,
"grad_norm": 3.3762011528015137,
"learning_rate": 1e-05,
"loss": 0.3818,
"step": 19950
},
{
"epoch": 9.75341796875,
"grad_norm": 3.172435998916626,
"learning_rate": 1e-05,
"loss": 0.4118,
"step": 19975
},
{
"epoch": 9.765625,
"grad_norm": 3.6243398189544678,
"learning_rate": 1e-05,
"loss": 0.4184,
"step": 20000
},
{
"epoch": 9.765625,
"eval_cer": 21.236363636363638,
"eval_loss": 0.37908729910850525,
"eval_normalized_cer": 16.85162351006987,
"eval_runtime": 97.9066,
"eval_samples_per_second": 1.307,
"eval_steps_per_second": 0.082,
"step": 20000
},
{
"epoch": 9.77783203125,
"grad_norm": 3.5656485557556152,
"learning_rate": 1e-05,
"loss": 0.4162,
"step": 20025
},
{
"epoch": 9.7900390625,
"grad_norm": 3.304313898086548,
"learning_rate": 1e-05,
"loss": 0.3687,
"step": 20050
},
{
"epoch": 9.80224609375,
"grad_norm": 2.4108333587646484,
"learning_rate": 1e-05,
"loss": 0.364,
"step": 20075
},
{
"epoch": 9.814453125,
"grad_norm": 3.621305465698242,
"learning_rate": 1e-05,
"loss": 0.4071,
"step": 20100
},
{
"epoch": 9.82666015625,
"grad_norm": 3.3533174991607666,
"learning_rate": 1e-05,
"loss": 0.4386,
"step": 20125
},
{
"epoch": 9.8388671875,
"grad_norm": 2.8671376705169678,
"learning_rate": 1e-05,
"loss": 0.4299,
"step": 20150
},
{
"epoch": 9.85107421875,
"grad_norm": 3.2498745918273926,
"learning_rate": 1e-05,
"loss": 0.4011,
"step": 20175
},
{
"epoch": 9.86328125,
"grad_norm": 3.473806381225586,
"learning_rate": 1e-05,
"loss": 0.441,
"step": 20200
},
{
"epoch": 9.87548828125,
"grad_norm": 2.80523419380188,
"learning_rate": 1e-05,
"loss": 0.4404,
"step": 20225
},
{
"epoch": 9.8876953125,
"grad_norm": 2.332698345184326,
"learning_rate": 1e-05,
"loss": 0.4269,
"step": 20250
},
{
"epoch": 9.89990234375,
"grad_norm": 2.8714382648468018,
"learning_rate": 1e-05,
"loss": 0.4263,
"step": 20275
},
{
"epoch": 9.912109375,
"grad_norm": 3.2583768367767334,
"learning_rate": 1e-05,
"loss": 0.4206,
"step": 20300
},
{
"epoch": 9.92431640625,
"grad_norm": 3.4748706817626953,
"learning_rate": 1e-05,
"loss": 0.4348,
"step": 20325
},
{
"epoch": 9.9365234375,
"grad_norm": 2.696352243423462,
"learning_rate": 1e-05,
"loss": 0.4237,
"step": 20350
},
{
"epoch": 9.94873046875,
"grad_norm": 3.5071768760681152,
"learning_rate": 1e-05,
"loss": 0.4521,
"step": 20375
},
{
"epoch": 9.9609375,
"grad_norm": 3.7230043411254883,
"learning_rate": 1e-05,
"loss": 0.4387,
"step": 20400
},
{
"epoch": 9.97314453125,
"grad_norm": 4.502689361572266,
"learning_rate": 1e-05,
"loss": 0.4303,
"step": 20425
},
{
"epoch": 9.9853515625,
"grad_norm": 2.849353790283203,
"learning_rate": 1e-05,
"loss": 0.3952,
"step": 20450
},
{
"epoch": 9.99755859375,
"grad_norm": 3.3098607063293457,
"learning_rate": 1e-05,
"loss": 0.3903,
"step": 20475
},
{
"epoch": 10.009765625,
"grad_norm": 2.970693349838257,
"learning_rate": 1e-05,
"loss": 0.3752,
"step": 20500
},
{
"epoch": 10.009765625,
"eval_cer": 24.581818181818182,
"eval_loss": 0.3705242872238159,
"eval_normalized_cer": 19.31771475544595,
"eval_runtime": 88.6677,
"eval_samples_per_second": 1.444,
"eval_steps_per_second": 0.09,
"step": 20500
},
{
"epoch": 10.02197265625,
"grad_norm": 2.900347948074341,
"learning_rate": 1e-05,
"loss": 0.4193,
"step": 20525
},
{
"epoch": 10.0341796875,
"grad_norm": 2.864896297454834,
"learning_rate": 1e-05,
"loss": 0.4096,
"step": 20550
},
{
"epoch": 10.04638671875,
"grad_norm": 2.738795518875122,
"learning_rate": 1e-05,
"loss": 0.4447,
"step": 20575
},
{
"epoch": 10.05859375,
"grad_norm": 2.6308414936065674,
"learning_rate": 1e-05,
"loss": 0.3794,
"step": 20600
},
{
"epoch": 10.07080078125,
"grad_norm": 2.4290077686309814,
"learning_rate": 1e-05,
"loss": 0.4009,
"step": 20625
},
{
"epoch": 10.0830078125,
"grad_norm": 2.612316846847534,
"learning_rate": 1e-05,
"loss": 0.3893,
"step": 20650
},
{
"epoch": 10.09521484375,
"grad_norm": 3.2632391452789307,
"learning_rate": 1e-05,
"loss": 0.3999,
"step": 20675
},
{
"epoch": 10.107421875,
"grad_norm": 2.5368306636810303,
"learning_rate": 1e-05,
"loss": 0.4016,
"step": 20700
},
{
"epoch": 10.11962890625,
"grad_norm": 3.5920157432556152,
"learning_rate": 1e-05,
"loss": 0.4465,
"step": 20725
},
{
"epoch": 10.1318359375,
"grad_norm": 2.3094773292541504,
"learning_rate": 1e-05,
"loss": 0.4074,
"step": 20750
},
{
"epoch": 10.14404296875,
"grad_norm": 2.4848082065582275,
"learning_rate": 1e-05,
"loss": 0.381,
"step": 20775
},
{
"epoch": 10.15625,
"grad_norm": 3.432950496673584,
"learning_rate": 1e-05,
"loss": 0.3846,
"step": 20800
},
{
"epoch": 10.16845703125,
"grad_norm": 3.441004514694214,
"learning_rate": 1e-05,
"loss": 0.4343,
"step": 20825
},
{
"epoch": 10.1806640625,
"grad_norm": 1.9125665426254272,
"learning_rate": 1e-05,
"loss": 0.3822,
"step": 20850
},
{
"epoch": 10.19287109375,
"grad_norm": 2.5689966678619385,
"learning_rate": 1e-05,
"loss": 0.4069,
"step": 20875
},
{
"epoch": 10.205078125,
"grad_norm": 2.4284005165100098,
"learning_rate": 1e-05,
"loss": 0.4481,
"step": 20900
},
{
"epoch": 10.21728515625,
"grad_norm": 2.9357855319976807,
"learning_rate": 1e-05,
"loss": 0.3985,
"step": 20925
},
{
"epoch": 10.2294921875,
"grad_norm": 3.1418330669403076,
"learning_rate": 1e-05,
"loss": 0.4334,
"step": 20950
},
{
"epoch": 10.24169921875,
"grad_norm": 3.525568723678589,
"learning_rate": 1e-05,
"loss": 0.4659,
"step": 20975
},
{
"epoch": 10.25390625,
"grad_norm": 3.033616304397583,
"learning_rate": 1e-05,
"loss": 0.4532,
"step": 21000
},
{
"epoch": 10.25390625,
"eval_cer": 22.363636363636363,
"eval_loss": 0.3776665925979614,
"eval_normalized_cer": 18.372379778051787,
"eval_runtime": 88.8963,
"eval_samples_per_second": 1.44,
"eval_steps_per_second": 0.09,
"step": 21000
},
{
"epoch": 10.26611328125,
"grad_norm": 3.0008301734924316,
"learning_rate": 1e-05,
"loss": 0.4229,
"step": 21025
},
{
"epoch": 10.2783203125,
"grad_norm": 4.1073102951049805,
"learning_rate": 1e-05,
"loss": 0.43,
"step": 21050
},
{
"epoch": 10.29052734375,
"grad_norm": 2.6684110164642334,
"learning_rate": 1e-05,
"loss": 0.4007,
"step": 21075
},
{
"epoch": 10.302734375,
"grad_norm": 2.416282892227173,
"learning_rate": 1e-05,
"loss": 0.4006,
"step": 21100
},
{
"epoch": 10.31494140625,
"grad_norm": 3.2998218536376953,
"learning_rate": 1e-05,
"loss": 0.4207,
"step": 21125
},
{
"epoch": 10.3271484375,
"grad_norm": 4.176929473876953,
"learning_rate": 1e-05,
"loss": 0.4559,
"step": 21150
},
{
"epoch": 10.33935546875,
"grad_norm": 2.522796154022217,
"learning_rate": 1e-05,
"loss": 0.4052,
"step": 21175
},
{
"epoch": 10.3515625,
"grad_norm": 2.9196386337280273,
"learning_rate": 1e-05,
"loss": 0.414,
"step": 21200
},
{
"epoch": 10.36376953125,
"grad_norm": 2.877315044403076,
"learning_rate": 1e-05,
"loss": 0.4454,
"step": 21225
},
{
"epoch": 10.3759765625,
"grad_norm": 3.0172317028045654,
"learning_rate": 1e-05,
"loss": 0.4294,
"step": 21250
},
{
"epoch": 10.38818359375,
"grad_norm": 3.5081894397735596,
"learning_rate": 1e-05,
"loss": 0.4283,
"step": 21275
},
{
"epoch": 10.400390625,
"grad_norm": 2.136151075363159,
"learning_rate": 1e-05,
"loss": 0.429,
"step": 21300
},
{
"epoch": 10.41259765625,
"grad_norm": 3.0397658348083496,
"learning_rate": 1e-05,
"loss": 0.4019,
"step": 21325
},
{
"epoch": 10.4248046875,
"grad_norm": 3.9129014015197754,
"learning_rate": 1e-05,
"loss": 0.4128,
"step": 21350
},
{
"epoch": 10.43701171875,
"grad_norm": 3.1991934776306152,
"learning_rate": 1e-05,
"loss": 0.4,
"step": 21375
},
{
"epoch": 10.44921875,
"grad_norm": 2.850768804550171,
"learning_rate": 1e-05,
"loss": 0.4492,
"step": 21400
},
{
"epoch": 10.46142578125,
"grad_norm": 3.6122255325317383,
"learning_rate": 1e-05,
"loss": 0.3692,
"step": 21425
},
{
"epoch": 10.4736328125,
"grad_norm": 3.7408318519592285,
"learning_rate": 1e-05,
"loss": 0.4239,
"step": 21450
},
{
"epoch": 10.48583984375,
"grad_norm": 2.4114573001861572,
"learning_rate": 1e-05,
"loss": 0.3595,
"step": 21475
},
{
"epoch": 10.498046875,
"grad_norm": 3.060419797897339,
"learning_rate": 1e-05,
"loss": 0.3665,
"step": 21500
},
{
"epoch": 10.498046875,
"eval_cer": 20.945454545454545,
"eval_loss": 0.3751263916492462,
"eval_normalized_cer": 17.13933415536375,
"eval_runtime": 91.0253,
"eval_samples_per_second": 1.406,
"eval_steps_per_second": 0.088,
"step": 21500
},
{
"epoch": 10.51025390625,
"grad_norm": 2.921161413192749,
"learning_rate": 1e-05,
"loss": 0.4385,
"step": 21525
},
{
"epoch": 10.5224609375,
"grad_norm": 3.127946376800537,
"learning_rate": 1e-05,
"loss": 0.4185,
"step": 21550
},
{
"epoch": 10.53466796875,
"grad_norm": 2.4071595668792725,
"learning_rate": 1e-05,
"loss": 0.4172,
"step": 21575
},
{
"epoch": 10.546875,
"grad_norm": 3.3871352672576904,
"learning_rate": 1e-05,
"loss": 0.3781,
"step": 21600
},
{
"epoch": 10.55908203125,
"grad_norm": 2.465686082839966,
"learning_rate": 1e-05,
"loss": 0.4021,
"step": 21625
},
{
"epoch": 10.5712890625,
"grad_norm": 3.5105502605438232,
"learning_rate": 1e-05,
"loss": 0.3874,
"step": 21650
},
{
"epoch": 10.58349609375,
"grad_norm": 3.2150614261627197,
"learning_rate": 1e-05,
"loss": 0.4203,
"step": 21675
},
{
"epoch": 10.595703125,
"grad_norm": 2.755958080291748,
"learning_rate": 1e-05,
"loss": 0.3961,
"step": 21700
},
{
"epoch": 10.60791015625,
"grad_norm": 2.6068344116210938,
"learning_rate": 1e-05,
"loss": 0.425,
"step": 21725
},
{
"epoch": 10.6201171875,
"grad_norm": 4.860282897949219,
"learning_rate": 1e-05,
"loss": 0.4188,
"step": 21750
},
{
"epoch": 10.63232421875,
"grad_norm": 3.154463529586792,
"learning_rate": 1e-05,
"loss": 0.4108,
"step": 21775
},
{
"epoch": 10.64453125,
"grad_norm": 3.5040194988250732,
"learning_rate": 1e-05,
"loss": 0.4295,
"step": 21800
},
{
"epoch": 10.65673828125,
"grad_norm": 3.5046403408050537,
"learning_rate": 1e-05,
"loss": 0.4306,
"step": 21825
},
{
"epoch": 10.6689453125,
"grad_norm": 3.5825250148773193,
"learning_rate": 1e-05,
"loss": 0.3991,
"step": 21850
},
{
"epoch": 10.68115234375,
"grad_norm": 3.1898937225341797,
"learning_rate": 1e-05,
"loss": 0.4365,
"step": 21875
},
{
"epoch": 10.693359375,
"grad_norm": 2.6460320949554443,
"learning_rate": 1e-05,
"loss": 0.4053,
"step": 21900
},
{
"epoch": 10.70556640625,
"grad_norm": 2.7370963096618652,
"learning_rate": 1e-05,
"loss": 0.3961,
"step": 21925
},
{
"epoch": 10.7177734375,
"grad_norm": 2.502699851989746,
"learning_rate": 1e-05,
"loss": 0.3943,
"step": 21950
},
{
"epoch": 10.72998046875,
"grad_norm": 3.806084632873535,
"learning_rate": 1e-05,
"loss": 0.424,
"step": 21975
},
{
"epoch": 10.7421875,
"grad_norm": 2.743624687194824,
"learning_rate": 1e-05,
"loss": 0.3897,
"step": 22000
},
{
"epoch": 10.7421875,
"eval_cer": 22.763636363636365,
"eval_loss": 0.3785582482814789,
"eval_normalized_cer": 18.53678586107686,
"eval_runtime": 89.903,
"eval_samples_per_second": 1.424,
"eval_steps_per_second": 0.089,
"step": 22000
},
{
"epoch": 10.75439453125,
"grad_norm": 3.1976799964904785,
"learning_rate": 1e-05,
"loss": 0.439,
"step": 22025
},
{
"epoch": 10.7666015625,
"grad_norm": 2.4848337173461914,
"learning_rate": 1e-05,
"loss": 0.4722,
"step": 22050
},
{
"epoch": 10.77880859375,
"grad_norm": 2.554133176803589,
"learning_rate": 1e-05,
"loss": 0.4607,
"step": 22075
},
{
"epoch": 10.791015625,
"grad_norm": 3.2724106311798096,
"learning_rate": 1e-05,
"loss": 0.429,
"step": 22100
},
{
"epoch": 10.80322265625,
"grad_norm": 2.920471668243408,
"learning_rate": 1e-05,
"loss": 0.3968,
"step": 22125
},
{
"epoch": 10.8154296875,
"grad_norm": 3.323936700820923,
"learning_rate": 1e-05,
"loss": 0.4126,
"step": 22150
},
{
"epoch": 10.82763671875,
"grad_norm": 3.5896575450897217,
"learning_rate": 1e-05,
"loss": 0.3986,
"step": 22175
},
{
"epoch": 10.83984375,
"grad_norm": 3.3386435508728027,
"learning_rate": 1e-05,
"loss": 0.3979,
"step": 22200
},
{
"epoch": 10.85205078125,
"grad_norm": 3.4217638969421387,
"learning_rate": 1e-05,
"loss": 0.3928,
"step": 22225
},
{
"epoch": 10.8642578125,
"grad_norm": 2.90092134475708,
"learning_rate": 1e-05,
"loss": 0.3813,
"step": 22250
},
{
"epoch": 10.87646484375,
"grad_norm": 2.9017395973205566,
"learning_rate": 1e-05,
"loss": 0.4061,
"step": 22275
},
{
"epoch": 10.888671875,
"grad_norm": 3.1124579906463623,
"learning_rate": 1e-05,
"loss": 0.3933,
"step": 22300
},
{
"epoch": 10.90087890625,
"grad_norm": 2.763261079788208,
"learning_rate": 1e-05,
"loss": 0.3751,
"step": 22325
},
{
"epoch": 10.9130859375,
"grad_norm": 3.8521738052368164,
"learning_rate": 1e-05,
"loss": 0.4537,
"step": 22350
},
{
"epoch": 10.92529296875,
"grad_norm": 3.3494768142700195,
"learning_rate": 1e-05,
"loss": 0.3479,
"step": 22375
},
{
"epoch": 10.9375,
"grad_norm": 3.752168655395508,
"learning_rate": 1e-05,
"loss": 0.4393,
"step": 22400
},
{
"epoch": 10.94970703125,
"grad_norm": 2.980929374694824,
"learning_rate": 1e-05,
"loss": 0.4376,
"step": 22425
},
{
"epoch": 10.9619140625,
"grad_norm": 2.931426525115967,
"learning_rate": 1e-05,
"loss": 0.4006,
"step": 22450
},
{
"epoch": 10.97412109375,
"grad_norm": 2.798682451248169,
"learning_rate": 1e-05,
"loss": 0.3977,
"step": 22475
},
{
"epoch": 10.986328125,
"grad_norm": 3.374363422393799,
"learning_rate": 1e-05,
"loss": 0.3846,
"step": 22500
},
{
"epoch": 10.986328125,
"eval_cer": 21.12727272727273,
"eval_loss": 0.3707554340362549,
"eval_normalized_cer": 16.93382655158241,
"eval_runtime": 90.713,
"eval_samples_per_second": 1.411,
"eval_steps_per_second": 0.088,
"step": 22500
},
{
"epoch": 10.99853515625,
"grad_norm": 2.9657630920410156,
"learning_rate": 1e-05,
"loss": 0.4397,
"step": 22525
},
{
"epoch": 11.0107421875,
"grad_norm": 3.373459577560425,
"learning_rate": 1e-05,
"loss": 0.4813,
"step": 22550
},
{
"epoch": 11.02294921875,
"grad_norm": 2.293248176574707,
"learning_rate": 1e-05,
"loss": 0.3939,
"step": 22575
},
{
"epoch": 11.03515625,
"grad_norm": 3.1755404472351074,
"learning_rate": 1e-05,
"loss": 0.4173,
"step": 22600
},
{
"epoch": 11.04736328125,
"grad_norm": 4.223100185394287,
"learning_rate": 1e-05,
"loss": 0.3965,
"step": 22625
},
{
"epoch": 11.0595703125,
"grad_norm": 4.098690986633301,
"learning_rate": 1e-05,
"loss": 0.4371,
"step": 22650
},
{
"epoch": 11.07177734375,
"grad_norm": 2.937412977218628,
"learning_rate": 1e-05,
"loss": 0.4287,
"step": 22675
},
{
"epoch": 11.083984375,
"grad_norm": 2.240269422531128,
"learning_rate": 1e-05,
"loss": 0.4255,
"step": 22700
},
{
"epoch": 11.09619140625,
"grad_norm": 3.1162662506103516,
"learning_rate": 1e-05,
"loss": 0.4157,
"step": 22725
},
{
"epoch": 11.1083984375,
"grad_norm": 2.875746726989746,
"learning_rate": 1e-05,
"loss": 0.3941,
"step": 22750
},
{
"epoch": 11.12060546875,
"grad_norm": 2.4718353748321533,
"learning_rate": 1e-05,
"loss": 0.3975,
"step": 22775
},
{
"epoch": 11.1328125,
"grad_norm": 3.1226015090942383,
"learning_rate": 1e-05,
"loss": 0.4033,
"step": 22800
},
{
"epoch": 11.14501953125,
"grad_norm": 3.384474515914917,
"learning_rate": 1e-05,
"loss": 0.4701,
"step": 22825
},
{
"epoch": 11.1572265625,
"grad_norm": 3.434800148010254,
"learning_rate": 1e-05,
"loss": 0.404,
"step": 22850
},
{
"epoch": 11.16943359375,
"grad_norm": 2.6356847286224365,
"learning_rate": 1e-05,
"loss": 0.408,
"step": 22875
},
{
"epoch": 11.181640625,
"grad_norm": 2.8493409156799316,
"learning_rate": 1e-05,
"loss": 0.3589,
"step": 22900
},
{
"epoch": 11.19384765625,
"grad_norm": 3.3821358680725098,
"learning_rate": 1e-05,
"loss": 0.4357,
"step": 22925
},
{
"epoch": 11.2060546875,
"grad_norm": 2.6961047649383545,
"learning_rate": 1e-05,
"loss": 0.3989,
"step": 22950
},
{
"epoch": 11.21826171875,
"grad_norm": 2.9927561283111572,
"learning_rate": 1e-05,
"loss": 0.4523,
"step": 22975
},
{
"epoch": 11.23046875,
"grad_norm": 3.8990068435668945,
"learning_rate": 1e-05,
"loss": 0.4638,
"step": 23000
},
{
"epoch": 11.23046875,
"eval_cer": 20.545454545454543,
"eval_loss": 0.3684229254722595,
"eval_normalized_cer": 16.974928072338678,
"eval_runtime": 89.0848,
"eval_samples_per_second": 1.437,
"eval_steps_per_second": 0.09,
"step": 23000
},
{
"epoch": 11.24267578125,
"grad_norm": 3.1418068408966064,
"learning_rate": 1e-05,
"loss": 0.3857,
"step": 23025
},
{
"epoch": 11.2548828125,
"grad_norm": 3.578172445297241,
"learning_rate": 1e-05,
"loss": 0.4817,
"step": 23050
},
{
"epoch": 11.26708984375,
"grad_norm": 3.6145339012145996,
"learning_rate": 1e-05,
"loss": 0.3732,
"step": 23075
},
{
"epoch": 11.279296875,
"grad_norm": 3.0317516326904297,
"learning_rate": 1e-05,
"loss": 0.429,
"step": 23100
},
{
"epoch": 11.29150390625,
"grad_norm": 3.6998438835144043,
"learning_rate": 1e-05,
"loss": 0.4374,
"step": 23125
},
{
"epoch": 11.3037109375,
"grad_norm": 3.1519877910614014,
"learning_rate": 1e-05,
"loss": 0.4285,
"step": 23150
},
{
"epoch": 11.31591796875,
"grad_norm": 2.30660343170166,
"learning_rate": 1e-05,
"loss": 0.4427,
"step": 23175
},
{
"epoch": 11.328125,
"grad_norm": 2.7679004669189453,
"learning_rate": 1e-05,
"loss": 0.4266,
"step": 23200
},
{
"epoch": 11.34033203125,
"grad_norm": 4.154956340789795,
"learning_rate": 1e-05,
"loss": 0.3949,
"step": 23225
},
{
"epoch": 11.3525390625,
"grad_norm": 3.537022113800049,
"learning_rate": 1e-05,
"loss": 0.4484,
"step": 23250
},
{
"epoch": 11.36474609375,
"grad_norm": 3.042243242263794,
"learning_rate": 1e-05,
"loss": 0.4401,
"step": 23275
},
{
"epoch": 11.376953125,
"grad_norm": 3.82000994682312,
"learning_rate": 1e-05,
"loss": 0.3968,
"step": 23300
},
{
"epoch": 11.38916015625,
"grad_norm": 2.052567958831787,
"learning_rate": 1e-05,
"loss": 0.3856,
"step": 23325
},
{
"epoch": 11.4013671875,
"grad_norm": 3.497328758239746,
"learning_rate": 1e-05,
"loss": 0.3915,
"step": 23350
},
{
"epoch": 11.41357421875,
"grad_norm": 3.023749589920044,
"learning_rate": 1e-05,
"loss": 0.4381,
"step": 23375
},
{
"epoch": 11.42578125,
"grad_norm": 3.8763065338134766,
"learning_rate": 1e-05,
"loss": 0.3879,
"step": 23400
},
{
"epoch": 11.43798828125,
"grad_norm": 2.346403121948242,
"learning_rate": 1e-05,
"loss": 0.4255,
"step": 23425
},
{
"epoch": 11.4501953125,
"grad_norm": 2.7521772384643555,
"learning_rate": 1e-05,
"loss": 0.4073,
"step": 23450
},
{
"epoch": 11.46240234375,
"grad_norm": 2.2675764560699463,
"learning_rate": 1e-05,
"loss": 0.3837,
"step": 23475
},
{
"epoch": 11.474609375,
"grad_norm": 3.2309587001800537,
"learning_rate": 1e-05,
"loss": 0.4423,
"step": 23500
},
{
"epoch": 11.474609375,
"eval_cer": 21.454545454545453,
"eval_loss": 0.3692309558391571,
"eval_normalized_cer": 16.892725030826142,
"eval_runtime": 91.354,
"eval_samples_per_second": 1.401,
"eval_steps_per_second": 0.088,
"step": 23500
},
{
"epoch": 11.48681640625,
"grad_norm": 3.551201581954956,
"learning_rate": 1e-05,
"loss": 0.3808,
"step": 23525
},
{
"epoch": 11.4990234375,
"grad_norm": 3.828253984451294,
"learning_rate": 1e-05,
"loss": 0.4081,
"step": 23550
},
{
"epoch": 11.51123046875,
"grad_norm": 3.3823349475860596,
"learning_rate": 1e-05,
"loss": 0.3875,
"step": 23575
},
{
"epoch": 11.5234375,
"grad_norm": 3.332829713821411,
"learning_rate": 1e-05,
"loss": 0.4167,
"step": 23600
},
{
"epoch": 11.53564453125,
"grad_norm": 2.943939685821533,
"learning_rate": 1e-05,
"loss": 0.4397,
"step": 23625
},
{
"epoch": 11.5478515625,
"grad_norm": 3.0470168590545654,
"learning_rate": 1e-05,
"loss": 0.4395,
"step": 23650
},
{
"epoch": 11.56005859375,
"grad_norm": 3.1324830055236816,
"learning_rate": 1e-05,
"loss": 0.391,
"step": 23675
},
{
"epoch": 11.572265625,
"grad_norm": 2.959723711013794,
"learning_rate": 1e-05,
"loss": 0.4063,
"step": 23700
},
{
"epoch": 11.58447265625,
"grad_norm": 3.0424516201019287,
"learning_rate": 1e-05,
"loss": 0.457,
"step": 23725
},
{
"epoch": 11.5966796875,
"grad_norm": 2.7524912357330322,
"learning_rate": 1e-05,
"loss": 0.3916,
"step": 23750
},
{
"epoch": 11.60888671875,
"grad_norm": 3.8842151165008545,
"learning_rate": 1e-05,
"loss": 0.4134,
"step": 23775
},
{
"epoch": 11.62109375,
"grad_norm": 2.5505778789520264,
"learning_rate": 1e-05,
"loss": 0.4301,
"step": 23800
},
{
"epoch": 11.63330078125,
"grad_norm": 2.3430569171905518,
"learning_rate": 1e-05,
"loss": 0.3913,
"step": 23825
},
{
"epoch": 11.6455078125,
"grad_norm": 3.5037317276000977,
"learning_rate": 1e-05,
"loss": 0.4213,
"step": 23850
},
{
"epoch": 11.65771484375,
"grad_norm": 3.067307710647583,
"learning_rate": 1e-05,
"loss": 0.395,
"step": 23875
},
{
"epoch": 11.669921875,
"grad_norm": 3.3112151622772217,
"learning_rate": 1e-05,
"loss": 0.429,
"step": 23900
},
{
"epoch": 11.68212890625,
"grad_norm": 3.318018913269043,
"learning_rate": 1e-05,
"loss": 0.4593,
"step": 23925
},
{
"epoch": 11.6943359375,
"grad_norm": 2.73763370513916,
"learning_rate": 1e-05,
"loss": 0.3565,
"step": 23950
},
{
"epoch": 11.70654296875,
"grad_norm": 4.161790370941162,
"learning_rate": 1e-05,
"loss": 0.4181,
"step": 23975
},
{
"epoch": 11.71875,
"grad_norm": 2.161367893218994,
"learning_rate": 1e-05,
"loss": 0.3991,
"step": 24000
},
{
"epoch": 11.71875,
"eval_cer": 20.763636363636365,
"eval_loss": 0.37048545479774475,
"eval_normalized_cer": 16.769420468557335,
"eval_runtime": 97.2817,
"eval_samples_per_second": 1.316,
"eval_steps_per_second": 0.082,
"step": 24000
},
{
"epoch": 11.73095703125,
"grad_norm": 2.165503978729248,
"learning_rate": 1e-05,
"loss": 0.3863,
"step": 24025
},
{
"epoch": 11.7431640625,
"grad_norm": 3.243612289428711,
"learning_rate": 1e-05,
"loss": 0.4415,
"step": 24050
},
{
"epoch": 11.75537109375,
"grad_norm": 3.0755250453948975,
"learning_rate": 1e-05,
"loss": 0.4233,
"step": 24075
},
{
"epoch": 11.767578125,
"grad_norm": 3.3624966144561768,
"learning_rate": 1e-05,
"loss": 0.3975,
"step": 24100
},
{
"epoch": 11.77978515625,
"grad_norm": 3.3429675102233887,
"learning_rate": 1e-05,
"loss": 0.4349,
"step": 24125
},
{
"epoch": 11.7919921875,
"grad_norm": 2.686250925064087,
"learning_rate": 1e-05,
"loss": 0.3938,
"step": 24150
},
{
"epoch": 11.80419921875,
"grad_norm": 2.747789144515991,
"learning_rate": 1e-05,
"loss": 0.4164,
"step": 24175
},
{
"epoch": 11.81640625,
"grad_norm": 3.79379940032959,
"learning_rate": 1e-05,
"loss": 0.4195,
"step": 24200
},
{
"epoch": 11.82861328125,
"grad_norm": 3.6123952865600586,
"learning_rate": 1e-05,
"loss": 0.399,
"step": 24225
},
{
"epoch": 11.8408203125,
"grad_norm": 3.1555302143096924,
"learning_rate": 1e-05,
"loss": 0.4238,
"step": 24250
},
{
"epoch": 11.85302734375,
"grad_norm": 2.9324278831481934,
"learning_rate": 1e-05,
"loss": 0.4096,
"step": 24275
},
{
"epoch": 11.865234375,
"grad_norm": 2.907283067703247,
"learning_rate": 1e-05,
"loss": 0.4128,
"step": 24300
},
{
"epoch": 11.87744140625,
"grad_norm": 3.6261141300201416,
"learning_rate": 1e-05,
"loss": 0.4398,
"step": 24325
},
{
"epoch": 11.8896484375,
"grad_norm": 4.799058437347412,
"learning_rate": 1e-05,
"loss": 0.4489,
"step": 24350
},
{
"epoch": 11.90185546875,
"grad_norm": 2.681626558303833,
"learning_rate": 1e-05,
"loss": 0.4308,
"step": 24375
},
{
"epoch": 11.9140625,
"grad_norm": 2.723325729370117,
"learning_rate": 1e-05,
"loss": 0.3909,
"step": 24400
},
{
"epoch": 11.92626953125,
"grad_norm": 4.047337055206299,
"learning_rate": 1e-05,
"loss": 0.3749,
"step": 24425
},
{
"epoch": 11.9384765625,
"grad_norm": 3.933332920074463,
"learning_rate": 1e-05,
"loss": 0.4232,
"step": 24450
},
{
"epoch": 11.95068359375,
"grad_norm": 2.8435702323913574,
"learning_rate": 1e-05,
"loss": 0.4071,
"step": 24475
},
{
"epoch": 11.962890625,
"grad_norm": 3.180521249771118,
"learning_rate": 1e-05,
"loss": 0.4023,
"step": 24500
},
{
"epoch": 11.962890625,
"eval_cer": 18.436363636363637,
"eval_loss": 0.36682504415512085,
"eval_normalized_cer": 14.26222770242499,
"eval_runtime": 89.1047,
"eval_samples_per_second": 1.437,
"eval_steps_per_second": 0.09,
"step": 24500
},
{
"epoch": 11.97509765625,
"grad_norm": 2.902740716934204,
"learning_rate": 1e-05,
"loss": 0.4535,
"step": 24525
},
{
"epoch": 11.9873046875,
"grad_norm": 2.6517724990844727,
"learning_rate": 1e-05,
"loss": 0.4093,
"step": 24550
},
{
"epoch": 11.99951171875,
"grad_norm": 3.2232935428619385,
"learning_rate": 1e-05,
"loss": 0.4295,
"step": 24575
},
{
"epoch": 12.01171875,
"grad_norm": 1.8525205850601196,
"learning_rate": 1e-05,
"loss": 0.4109,
"step": 24600
},
{
"epoch": 12.02392578125,
"grad_norm": 2.505067825317383,
"learning_rate": 1e-05,
"loss": 0.3659,
"step": 24625
},
{
"epoch": 12.0361328125,
"grad_norm": 2.921861410140991,
"learning_rate": 1e-05,
"loss": 0.387,
"step": 24650
},
{
"epoch": 12.04833984375,
"grad_norm": 3.4830503463745117,
"learning_rate": 1e-05,
"loss": 0.4036,
"step": 24675
},
{
"epoch": 12.060546875,
"grad_norm": 3.166236400604248,
"learning_rate": 1e-05,
"loss": 0.4014,
"step": 24700
},
{
"epoch": 12.07275390625,
"grad_norm": 2.9270448684692383,
"learning_rate": 1e-05,
"loss": 0.3983,
"step": 24725
},
{
"epoch": 12.0849609375,
"grad_norm": 3.857215166091919,
"learning_rate": 1e-05,
"loss": 0.4343,
"step": 24750
},
{
"epoch": 12.09716796875,
"grad_norm": 3.039315700531006,
"learning_rate": 1e-05,
"loss": 0.4095,
"step": 24775
},
{
"epoch": 12.109375,
"grad_norm": 4.983880043029785,
"learning_rate": 1e-05,
"loss": 0.4108,
"step": 24800
},
{
"epoch": 12.12158203125,
"grad_norm": 3.1066694259643555,
"learning_rate": 1e-05,
"loss": 0.421,
"step": 24825
},
{
"epoch": 12.1337890625,
"grad_norm": 3.10778546333313,
"learning_rate": 1e-05,
"loss": 0.4243,
"step": 24850
},
{
"epoch": 12.14599609375,
"grad_norm": 2.57086443901062,
"learning_rate": 1e-05,
"loss": 0.4049,
"step": 24875
},
{
"epoch": 12.158203125,
"grad_norm": 2.7861294746398926,
"learning_rate": 1e-05,
"loss": 0.3917,
"step": 24900
},
{
"epoch": 12.17041015625,
"grad_norm": 3.35518217086792,
"learning_rate": 1e-05,
"loss": 0.4332,
"step": 24925
},
{
"epoch": 12.1826171875,
"grad_norm": 2.97426176071167,
"learning_rate": 1e-05,
"loss": 0.4148,
"step": 24950
},
{
"epoch": 12.19482421875,
"grad_norm": 2.733003854751587,
"learning_rate": 1e-05,
"loss": 0.3901,
"step": 24975
},
{
"epoch": 12.20703125,
"grad_norm": 2.4438769817352295,
"learning_rate": 1e-05,
"loss": 0.4039,
"step": 25000
},
{
"epoch": 12.20703125,
"eval_cer": 20.763636363636365,
"eval_loss": 0.3740121126174927,
"eval_normalized_cer": 15.454171804356761,
"eval_runtime": 88.8688,
"eval_samples_per_second": 1.44,
"eval_steps_per_second": 0.09,
"step": 25000
},
{
"epoch": 12.21923828125,
"grad_norm": 2.897085666656494,
"learning_rate": 1e-05,
"loss": 0.4155,
"step": 25025
},
{
"epoch": 12.2314453125,
"grad_norm": 3.21028470993042,
"learning_rate": 1e-05,
"loss": 0.4249,
"step": 25050
},
{
"epoch": 12.24365234375,
"grad_norm": 2.8493075370788574,
"learning_rate": 1e-05,
"loss": 0.4279,
"step": 25075
},
{
"epoch": 12.255859375,
"grad_norm": 3.8995752334594727,
"learning_rate": 1e-05,
"loss": 0.4169,
"step": 25100
},
{
"epoch": 12.26806640625,
"grad_norm": 3.4920387268066406,
"learning_rate": 1e-05,
"loss": 0.4164,
"step": 25125
},
{
"epoch": 12.2802734375,
"grad_norm": 3.276472330093384,
"learning_rate": 1e-05,
"loss": 0.419,
"step": 25150
},
{
"epoch": 12.29248046875,
"grad_norm": 2.8615663051605225,
"learning_rate": 1e-05,
"loss": 0.3974,
"step": 25175
},
{
"epoch": 12.3046875,
"grad_norm": 4.341454029083252,
"learning_rate": 1e-05,
"loss": 0.4033,
"step": 25200
},
{
"epoch": 12.31689453125,
"grad_norm": 4.287600517272949,
"learning_rate": 1e-05,
"loss": 0.4316,
"step": 25225
},
{
"epoch": 12.3291015625,
"grad_norm": 2.183636426925659,
"learning_rate": 1e-05,
"loss": 0.4461,
"step": 25250
},
{
"epoch": 12.34130859375,
"grad_norm": 2.487642765045166,
"learning_rate": 1e-05,
"loss": 0.413,
"step": 25275
},
{
"epoch": 12.353515625,
"grad_norm": 2.7473361492156982,
"learning_rate": 1e-05,
"loss": 0.445,
"step": 25300
},
{
"epoch": 12.36572265625,
"grad_norm": 3.353499174118042,
"learning_rate": 1e-05,
"loss": 0.4473,
"step": 25325
},
{
"epoch": 12.3779296875,
"grad_norm": 2.5522449016571045,
"learning_rate": 1e-05,
"loss": 0.41,
"step": 25350
},
{
"epoch": 12.39013671875,
"grad_norm": 4.818186283111572,
"learning_rate": 1e-05,
"loss": 0.445,
"step": 25375
},
{
"epoch": 12.40234375,
"grad_norm": 4.093599796295166,
"learning_rate": 1e-05,
"loss": 0.4741,
"step": 25400
},
{
"epoch": 12.41455078125,
"grad_norm": 3.5378177165985107,
"learning_rate": 1e-05,
"loss": 0.4002,
"step": 25425
},
{
"epoch": 12.4267578125,
"grad_norm": 2.7892191410064697,
"learning_rate": 1e-05,
"loss": 0.3966,
"step": 25450
},
{
"epoch": 12.43896484375,
"grad_norm": 4.0514044761657715,
"learning_rate": 1e-05,
"loss": 0.4497,
"step": 25475
},
{
"epoch": 12.451171875,
"grad_norm": 2.708777904510498,
"learning_rate": 1e-05,
"loss": 0.4542,
"step": 25500
},
{
"epoch": 12.451171875,
"eval_cer": 21.78181818181818,
"eval_loss": 0.3693616986274719,
"eval_normalized_cer": 17.879161528976574,
"eval_runtime": 91.1491,
"eval_samples_per_second": 1.404,
"eval_steps_per_second": 0.088,
"step": 25500
},
{
"epoch": 12.46337890625,
"grad_norm": 2.597757577896118,
"learning_rate": 1e-05,
"loss": 0.398,
"step": 25525
},
{
"epoch": 12.4755859375,
"grad_norm": 4.003383159637451,
"learning_rate": 1e-05,
"loss": 0.3819,
"step": 25550
},
{
"epoch": 12.48779296875,
"grad_norm": 2.98598575592041,
"learning_rate": 1e-05,
"loss": 0.3604,
"step": 25575
},
{
"epoch": 12.5,
"grad_norm": 2.1123640537261963,
"learning_rate": 1e-05,
"loss": 0.4159,
"step": 25600
},
{
"epoch": 12.51220703125,
"grad_norm": 3.579258441925049,
"learning_rate": 1e-05,
"loss": 0.4355,
"step": 25625
},
{
"epoch": 12.5244140625,
"grad_norm": 3.140286922454834,
"learning_rate": 1e-05,
"loss": 0.3937,
"step": 25650
},
{
"epoch": 12.53662109375,
"grad_norm": 2.7550101280212402,
"learning_rate": 1e-05,
"loss": 0.4045,
"step": 25675
},
{
"epoch": 12.548828125,
"grad_norm": 2.977573871612549,
"learning_rate": 1e-05,
"loss": 0.4117,
"step": 25700
},
{
"epoch": 12.56103515625,
"grad_norm": 2.6313016414642334,
"learning_rate": 1e-05,
"loss": 0.3912,
"step": 25725
},
{
"epoch": 12.5732421875,
"grad_norm": 1.7519272565841675,
"learning_rate": 1e-05,
"loss": 0.4081,
"step": 25750
},
{
"epoch": 12.58544921875,
"grad_norm": 2.9577393531799316,
"learning_rate": 1e-05,
"loss": 0.4349,
"step": 25775
},
{
"epoch": 12.59765625,
"grad_norm": 3.945467233657837,
"learning_rate": 1e-05,
"loss": 0.403,
"step": 25800
},
{
"epoch": 12.60986328125,
"grad_norm": 3.028316020965576,
"learning_rate": 1e-05,
"loss": 0.4188,
"step": 25825
},
{
"epoch": 12.6220703125,
"grad_norm": 2.839320421218872,
"learning_rate": 1e-05,
"loss": 0.4018,
"step": 25850
},
{
"epoch": 12.63427734375,
"grad_norm": 2.295088291168213,
"learning_rate": 1e-05,
"loss": 0.35,
"step": 25875
},
{
"epoch": 12.646484375,
"grad_norm": 2.7483038902282715,
"learning_rate": 1e-05,
"loss": 0.4129,
"step": 25900
},
{
"epoch": 12.65869140625,
"grad_norm": 3.1008427143096924,
"learning_rate": 1e-05,
"loss": 0.3926,
"step": 25925
},
{
"epoch": 12.6708984375,
"grad_norm": 2.92093825340271,
"learning_rate": 1e-05,
"loss": 0.3813,
"step": 25950
},
{
"epoch": 12.68310546875,
"grad_norm": 2.930253267288208,
"learning_rate": 1e-05,
"loss": 0.3903,
"step": 25975
},
{
"epoch": 12.6953125,
"grad_norm": 2.860041379928589,
"learning_rate": 1e-05,
"loss": 0.4042,
"step": 26000
},
{
"epoch": 12.6953125,
"eval_cer": 22.10909090909091,
"eval_loss": 0.3708876967430115,
"eval_normalized_cer": 17.79695848746404,
"eval_runtime": 90.9962,
"eval_samples_per_second": 1.407,
"eval_steps_per_second": 0.088,
"step": 26000
},
{
"epoch": 12.70751953125,
"grad_norm": 5.0800299644470215,
"learning_rate": 1e-05,
"loss": 0.4279,
"step": 26025
},
{
"epoch": 12.7197265625,
"grad_norm": 4.715578556060791,
"learning_rate": 1e-05,
"loss": 0.4126,
"step": 26050
},
{
"epoch": 12.73193359375,
"grad_norm": 3.818558692932129,
"learning_rate": 1e-05,
"loss": 0.4176,
"step": 26075
},
{
"epoch": 12.744140625,
"grad_norm": 3.0944199562072754,
"learning_rate": 1e-05,
"loss": 0.4155,
"step": 26100
},
{
"epoch": 12.75634765625,
"grad_norm": 4.057784080505371,
"learning_rate": 1e-05,
"loss": 0.4359,
"step": 26125
},
{
"epoch": 12.7685546875,
"grad_norm": 2.12882661819458,
"learning_rate": 1e-05,
"loss": 0.427,
"step": 26150
},
{
"epoch": 12.78076171875,
"grad_norm": 2.9268953800201416,
"learning_rate": 1e-05,
"loss": 0.4437,
"step": 26175
},
{
"epoch": 12.79296875,
"grad_norm": 3.1372625827789307,
"learning_rate": 1e-05,
"loss": 0.3814,
"step": 26200
},
{
"epoch": 12.80517578125,
"grad_norm": 3.171339511871338,
"learning_rate": 1e-05,
"loss": 0.461,
"step": 26225
},
{
"epoch": 12.8173828125,
"grad_norm": 2.2062418460845947,
"learning_rate": 1e-05,
"loss": 0.4147,
"step": 26250
},
{
"epoch": 12.82958984375,
"grad_norm": 4.490868091583252,
"learning_rate": 1e-05,
"loss": 0.3782,
"step": 26275
},
{
"epoch": 12.841796875,
"grad_norm": 4.018364429473877,
"learning_rate": 1e-05,
"loss": 0.4402,
"step": 26300
},
{
"epoch": 12.85400390625,
"grad_norm": 3.0183207988739014,
"learning_rate": 1e-05,
"loss": 0.4353,
"step": 26325
},
{
"epoch": 12.8662109375,
"grad_norm": 3.209418535232544,
"learning_rate": 1e-05,
"loss": 0.4267,
"step": 26350
},
{
"epoch": 12.87841796875,
"grad_norm": 2.75201678276062,
"learning_rate": 1e-05,
"loss": 0.4098,
"step": 26375
},
{
"epoch": 12.890625,
"grad_norm": 2.8569531440734863,
"learning_rate": 1e-05,
"loss": 0.4134,
"step": 26400
},
{
"epoch": 12.90283203125,
"grad_norm": 3.4323573112487793,
"learning_rate": 1e-05,
"loss": 0.392,
"step": 26425
},
{
"epoch": 12.9150390625,
"grad_norm": 2.4016165733337402,
"learning_rate": 1e-05,
"loss": 0.407,
"step": 26450
},
{
"epoch": 12.92724609375,
"grad_norm": 2.4917731285095215,
"learning_rate": 1e-05,
"loss": 0.3779,
"step": 26475
},
{
"epoch": 12.939453125,
"grad_norm": 2.2973673343658447,
"learning_rate": 1e-05,
"loss": 0.4162,
"step": 26500
},
{
"epoch": 12.939453125,
"eval_cer": 20.10909090909091,
"eval_loss": 0.36156314611434937,
"eval_normalized_cer": 16.029593094944513,
"eval_runtime": 88.7052,
"eval_samples_per_second": 1.443,
"eval_steps_per_second": 0.09,
"step": 26500
},
{
"epoch": 12.95166015625,
"grad_norm": 2.8456501960754395,
"learning_rate": 1e-05,
"loss": 0.4392,
"step": 26525
},
{
"epoch": 12.9638671875,
"grad_norm": 2.975572109222412,
"learning_rate": 1e-05,
"loss": 0.4097,
"step": 26550
},
{
"epoch": 12.97607421875,
"grad_norm": 4.956911563873291,
"learning_rate": 1e-05,
"loss": 0.4098,
"step": 26575
},
{
"epoch": 12.98828125,
"grad_norm": 3.6981606483459473,
"learning_rate": 1e-05,
"loss": 0.4451,
"step": 26600
},
{
"epoch": 13.0,
"step": 26624,
"total_flos": 6.747472049681203e+20,
"train_loss": 0.031781960636950456,
"train_runtime": 2625.017,
"train_samples_per_second": 162.279,
"train_steps_per_second": 10.142
}
],
"logging_steps": 25,
"max_steps": 26624,
"num_input_tokens_seen": 0,
"num_train_epochs": 13,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.747472049681203e+20,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}