byt5-beat-align-base / trainer_state.json
melzohbi's picture
This model allows to generate multiple words aligned to a beat
5f7d7d2
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.9999225970246295,
"global_step": 32296,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 9.287925696594427e-07,
"loss": 7.2762,
"step": 1
},
{
"epoch": 0.03,
"learning_rate": 0.00023219814241486067,
"loss": 3.2842,
"step": 250
},
{
"epoch": 0.06,
"learning_rate": 0.0002999773154682302,
"loss": 1.4905,
"step": 500
},
{
"epoch": 0.09,
"learning_rate": 0.0002998679962481985,
"loss": 1.2796,
"step": 750
},
{
"epoch": 0.12,
"learning_rate": 0.0002996682496269463,
"loss": 1.1723,
"step": 1000
},
{
"epoch": 0.15,
"learning_rate": 0.00029937819612765605,
"loss": 1.1113,
"step": 1250
},
{
"epoch": 0.19,
"learning_rate": 0.0002989980107629042,
"loss": 1.0653,
"step": 1500
},
{
"epoch": 0.22,
"learning_rate": 0.00029852792292906226,
"loss": 1.0398,
"step": 1750
},
{
"epoch": 0.25,
"learning_rate": 0.0002979682162678829,
"loss": 1.0151,
"step": 2000
},
{
"epoch": 0.28,
"learning_rate": 0.0002973192284953569,
"loss": 1.0004,
"step": 2250
},
{
"epoch": 0.31,
"learning_rate": 0.00029658135119794136,
"loss": 0.9809,
"step": 2500
},
{
"epoch": 0.34,
"learning_rate": 0.00029575502959628444,
"loss": 0.9701,
"step": 2750
},
{
"epoch": 0.37,
"learning_rate": 0.00029484076227658787,
"loss": 0.9533,
"step": 3000
},
{
"epoch": 0.4,
"learning_rate": 0.00029383910088976985,
"loss": 0.9449,
"step": 3250
},
{
"epoch": 0.43,
"learning_rate": 0.00029275064981860977,
"loss": 0.9328,
"step": 3500
},
{
"epoch": 0.46,
"learning_rate": 0.0002915760658130755,
"loss": 0.9233,
"step": 3750
},
{
"epoch": 0.5,
"learning_rate": 0.0002903160575940534,
"loss": 0.9198,
"step": 4000
},
{
"epoch": 0.53,
"learning_rate": 0.00028897138542572,
"loss": 0.9065,
"step": 4250
},
{
"epoch": 0.56,
"learning_rate": 0.00028754286065681367,
"loss": 0.9031,
"step": 4500
},
{
"epoch": 0.59,
"learning_rate": 0.0002860313452310827,
"loss": 0.8971,
"step": 4750
},
{
"epoch": 0.62,
"learning_rate": 0.0002844377511672054,
"loss": 0.8871,
"step": 5000
},
{
"epoch": 0.65,
"learning_rate": 0.0002827630400084958,
"loss": 0.8796,
"step": 5250
},
{
"epoch": 0.68,
"learning_rate": 0.00028100822224272776,
"loss": 0.8793,
"step": 5500
},
{
"epoch": 0.71,
"learning_rate": 0.0002791743566924259,
"loss": 0.8675,
"step": 5750
},
{
"epoch": 0.74,
"learning_rate": 0.0002772625498759935,
"loss": 0.8755,
"step": 6000
},
{
"epoch": 0.77,
"learning_rate": 0.00027527395534006084,
"loss": 0.8654,
"step": 6250
},
{
"epoch": 0.8,
"learning_rate": 0.0002732097729634576,
"loss": 0.8639,
"step": 6500
},
{
"epoch": 0.84,
"learning_rate": 0.0002710712482332301,
"loss": 0.8581,
"step": 6750
},
{
"epoch": 0.87,
"learning_rate": 0.0002688596714931383,
"loss": 0.8543,
"step": 7000
},
{
"epoch": 0.9,
"learning_rate": 0.0002665763771650883,
"loss": 0.849,
"step": 7250
},
{
"epoch": 0.93,
"learning_rate": 0.00026422274294396817,
"loss": 0.8407,
"step": 7500
},
{
"epoch": 0.96,
"learning_rate": 0.0002618001889663738,
"loss": 0.842,
"step": 7750
},
{
"epoch": 0.99,
"learning_rate": 0.0002593101769537267,
"loss": 0.8345,
"step": 8000
},
{
"epoch": 1.0,
"eval_corv_score": 0.9549480169426261,
"eval_lev_score": 0.9913295381925039,
"eval_runtime": 119.8225,
"eval_samples_per_second": 43.347,
"eval_steps_per_second": 2.712,
"eval_t5-prp-fluency": 7.594483043964092,
"step": 8074
},
{
"epoch": 1.02,
"learning_rate": 0.0002567542093302999,
"loss": 0.8192,
"step": 8250
},
{
"epoch": 1.05,
"learning_rate": 0.00025413382831668464,
"loss": 0.7998,
"step": 8500
},
{
"epoch": 1.08,
"learning_rate": 0.000251450614999245,
"loss": 0.7973,
"step": 8750
},
{
"epoch": 1.11,
"learning_rate": 0.0002487061883761213,
"loss": 0.7956,
"step": 9000
},
{
"epoch": 1.15,
"learning_rate": 0.0002459022043803584,
"loss": 0.7908,
"step": 9250
},
{
"epoch": 1.18,
"learning_rate": 0.00024304035488074857,
"loss": 0.791,
"step": 9500
},
{
"epoch": 1.21,
"learning_rate": 0.0002401223666609909,
"loss": 0.7884,
"step": 9750
},
{
"epoch": 1.24,
"learning_rate": 0.00023715000037778392,
"loss": 0.7937,
"step": 10000
},
{
"epoch": 1.27,
"learning_rate": 0.00023412504949848025,
"loss": 0.7863,
"step": 10250
},
{
"epoch": 1.3,
"learning_rate": 0.0002310493392189435,
"loss": 0.7907,
"step": 10500
},
{
"epoch": 1.33,
"learning_rate": 0.000227924725362261,
"loss": 0.7781,
"step": 10750
},
{
"epoch": 1.36,
"learning_rate": 0.0002247530932589765,
"loss": 0.7778,
"step": 11000
},
{
"epoch": 1.39,
"learning_rate": 0.00022153635660951925,
"loss": 0.7752,
"step": 11250
},
{
"epoch": 1.42,
"learning_rate": 0.00021827645632951426,
"loss": 0.7694,
"step": 11500
},
{
"epoch": 1.46,
"learning_rate": 0.00021497535937867236,
"loss": 0.771,
"step": 11750
},
{
"epoch": 1.49,
"learning_rate": 0.0002116350575739653,
"loss": 0.7709,
"step": 12000
},
{
"epoch": 1.52,
"learning_rate": 0.0002082575663878024,
"loss": 0.768,
"step": 12250
},
{
"epoch": 1.55,
"learning_rate": 0.00020484492373193437,
"loss": 0.7678,
"step": 12500
},
{
"epoch": 1.58,
"learning_rate": 0.00020139918872781742,
"loss": 0.7605,
"step": 12750
},
{
"epoch": 1.61,
"learning_rate": 0.00019792244046417973,
"loss": 0.7641,
"step": 13000
},
{
"epoch": 1.64,
"learning_rate": 0.0001944167767425403,
"loss": 0.7559,
"step": 13250
},
{
"epoch": 1.67,
"learning_rate": 0.00019088431281143674,
"loss": 0.7604,
"step": 13500
},
{
"epoch": 1.7,
"learning_rate": 0.00018732718009012596,
"loss": 0.7573,
"step": 13750
},
{
"epoch": 1.73,
"learning_rate": 0.0001837475248825276,
"loss": 0.7552,
"step": 14000
},
{
"epoch": 1.76,
"learning_rate": 0.00018014750708218673,
"loss": 0.7494,
"step": 14250
},
{
"epoch": 1.8,
"learning_rate": 0.00017652929886903608,
"loss": 0.7521,
"step": 14500
},
{
"epoch": 1.83,
"learning_rate": 0.00017289508339874587,
"loss": 0.7454,
"step": 14750
},
{
"epoch": 1.86,
"learning_rate": 0.00016924705348545055,
"loss": 0.7496,
"step": 15000
},
{
"epoch": 1.89,
"learning_rate": 0.00016558741027864748,
"loss": 0.7424,
"step": 15250
},
{
"epoch": 1.92,
"learning_rate": 0.00016191836193506709,
"loss": 0.7434,
"step": 15500
},
{
"epoch": 1.95,
"learning_rate": 0.0001582421222863143,
"loss": 0.7399,
"step": 15750
},
{
"epoch": 1.98,
"learning_rate": 0.00015456090950308622,
"loss": 0.7354,
"step": 16000
},
{
"epoch": 2.0,
"eval_corv_score": 0.9751636503658067,
"eval_lev_score": 0.9953171820578365,
"eval_runtime": 115.5905,
"eval_samples_per_second": 44.935,
"eval_steps_per_second": 2.812,
"eval_t5-prp-fluency": 7.509893327859732,
"step": 16148
},
{
"epoch": 2.01,
"learning_rate": 0.0001508769447567713,
"loss": 0.7177,
"step": 16250
},
{
"epoch": 2.04,
"learning_rate": 0.00014719245087923855,
"loss": 0.6866,
"step": 16500
},
{
"epoch": 2.07,
"learning_rate": 0.0001435096510216243,
"loss": 0.6838,
"step": 16750
},
{
"epoch": 2.11,
"learning_rate": 0.0001398307673129266,
"loss": 0.6827,
"step": 17000
},
{
"epoch": 2.14,
"learning_rate": 0.0001361580195192162,
"loss": 0.6826,
"step": 17250
},
{
"epoch": 2.17,
"learning_rate": 0.0001324936237042735,
"loss": 0.6816,
"step": 17500
},
{
"epoch": 2.2,
"learning_rate": 0.00012883979089245917,
"loss": 0.6859,
"step": 17750
},
{
"epoch": 2.23,
"learning_rate": 0.00012519872573462553,
"loss": 0.6783,
"step": 18000
},
{
"epoch": 2.26,
"learning_rate": 0.00012157262517787366,
"loss": 0.6826,
"step": 18250
},
{
"epoch": 2.29,
"learning_rate": 0.0001179636771399586,
"loss": 0.6756,
"step": 18500
},
{
"epoch": 2.32,
"learning_rate": 0.00011437405918914297,
"loss": 0.6782,
"step": 18750
},
{
"epoch": 2.35,
"learning_rate": 0.00011080593723029505,
"loss": 0.6762,
"step": 19000
},
{
"epoch": 2.38,
"learning_rate": 0.00010726146419802426,
"loss": 0.6736,
"step": 19250
},
{
"epoch": 2.42,
"learning_rate": 0.00010374277875764294,
"loss": 0.6716,
"step": 19500
},
{
"epoch": 2.45,
"learning_rate": 0.0001002520040147377,
"loss": 0.6695,
"step": 19750
},
{
"epoch": 2.48,
"learning_rate": 9.679124623412919e-05,
"loss": 0.6705,
"step": 20000
},
{
"epoch": 2.51,
"learning_rate": 9.336259356899358e-05,
"loss": 0.6656,
"step": 20250
},
{
"epoch": 2.54,
"learning_rate": 8.99681148009116e-05,
"loss": 0.6703,
"step": 20500
},
{
"epoch": 2.57,
"learning_rate": 8.660985809160688e-05,
"loss": 0.6694,
"step": 20750
},
{
"epoch": 2.6,
"learning_rate": 8.328984974712501e-05,
"loss": 0.661,
"step": 21000
},
{
"epoch": 2.63,
"learning_rate": 8.001009299520067e-05,
"loss": 0.6628,
"step": 21250
},
{
"epoch": 2.66,
"learning_rate": 7.677256677654927e-05,
"loss": 0.6637,
"step": 21500
},
{
"epoch": 2.69,
"learning_rate": 7.35792245508131e-05,
"loss": 0.6618,
"step": 21750
},
{
"epoch": 2.72,
"learning_rate": 7.043199311788208e-05,
"loss": 0.6656,
"step": 22000
},
{
"epoch": 2.76,
"learning_rate": 6.733277145530084e-05,
"loss": 0.6639,
"step": 22250
},
{
"epoch": 2.79,
"learning_rate": 6.428342957246265e-05,
"loss": 0.6534,
"step": 22500
},
{
"epoch": 2.82,
"learning_rate": 6.128580738228292e-05,
"loss": 0.6596,
"step": 22750
},
{
"epoch": 2.85,
"learning_rate": 5.8341713591031753e-05,
"loss": 0.6578,
"step": 23000
},
{
"epoch": 2.88,
"learning_rate": 5.545292460699615e-05,
"loss": 0.6567,
"step": 23250
},
{
"epoch": 2.91,
"learning_rate": 5.2621183468630204e-05,
"loss": 0.6557,
"step": 23500
},
{
"epoch": 2.94,
"learning_rate": 4.9848198792840026e-05,
"loss": 0.6566,
"step": 23750
},
{
"epoch": 2.97,
"learning_rate": 4.713564374403786e-05,
"loss": 0.6569,
"step": 24000
},
{
"epoch": 3.0,
"eval_corv_score": 0.9797843665768194,
"eval_lev_score": 0.99618999924753,
"eval_runtime": 155.6188,
"eval_samples_per_second": 33.376,
"eval_steps_per_second": 2.088,
"eval_t5-prp-fluency": 7.498019453195425,
"step": 24222
},
{
"epoch": 3.0,
"learning_rate": 4.448515502458757e-05,
"loss": 0.6509,
"step": 24250
},
{
"epoch": 3.03,
"learning_rate": 4.189833188725071e-05,
"loss": 0.6021,
"step": 24500
},
{
"epoch": 3.07,
"learning_rate": 3.9376735170228755e-05,
"loss": 0.5938,
"step": 24750
},
{
"epoch": 3.1,
"learning_rate": 3.6921886355384136e-05,
"loss": 0.594,
"step": 25000
},
{
"epoch": 3.13,
"learning_rate": 3.45352666502079e-05,
"loss": 0.5941,
"step": 25250
},
{
"epoch": 3.16,
"learning_rate": 3.221831609408837e-05,
"loss": 0.5967,
"step": 25500
},
{
"epoch": 3.19,
"learning_rate": 2.9972432689419552e-05,
"loss": 0.5897,
"step": 25750
},
{
"epoch": 3.22,
"learning_rate": 2.779897155807418e-05,
"loss": 0.5969,
"step": 26000
},
{
"epoch": 3.25,
"learning_rate": 2.569924412374956e-05,
"loss": 0.5944,
"step": 26250
},
{
"epoch": 3.28,
"learning_rate": 2.367451732068059e-05,
"loss": 0.598,
"step": 26500
},
{
"epoch": 3.31,
"learning_rate": 2.1726012829196434e-05,
"loss": 0.592,
"step": 26750
},
{
"epoch": 3.34,
"learning_rate": 1.9854906338582588e-05,
"loss": 0.5907,
"step": 27000
},
{
"epoch": 3.38,
"learning_rate": 1.8062326837693165e-05,
"loss": 0.5923,
"step": 27250
},
{
"epoch": 3.41,
"learning_rate": 1.6349355933741237e-05,
"loss": 0.5915,
"step": 27500
},
{
"epoch": 3.44,
"learning_rate": 1.4717027199678216e-05,
"loss": 0.5901,
"step": 27750
},
{
"epoch": 3.47,
"learning_rate": 1.3166325550556416e-05,
"loss": 0.5891,
"step": 28000
},
{
"epoch": 3.5,
"learning_rate": 1.1698186649250535e-05,
"loss": 0.5903,
"step": 28250
},
{
"epoch": 3.53,
"learning_rate": 1.0313496341897181e-05,
"loss": 0.587,
"step": 28500
},
{
"epoch": 3.56,
"learning_rate": 9.013090123392674e-06,
"loss": 0.5943,
"step": 28750
},
{
"epoch": 3.59,
"learning_rate": 7.797752633271908e-06,
"loss": 0.5919,
"step": 29000
},
{
"epoch": 3.62,
"learning_rate": 6.6682171822722675e-06,
"loss": 0.589,
"step": 29250
},
{
"epoch": 3.65,
"learning_rate": 5.625165309868323e-06,
"loss": 0.5911,
"step": 29500
},
{
"epoch": 3.68,
"learning_rate": 4.669226373044388e-06,
"loss": 0.5929,
"step": 29750
},
{
"epoch": 3.72,
"learning_rate": 3.8009771665527712e-06,
"loss": 0.5899,
"step": 30000
},
{
"epoch": 3.75,
"learning_rate": 3.0209415748873254e-06,
"loss": 0.592,
"step": 30250
},
{
"epoch": 3.78,
"learning_rate": 2.3295902561817516e-06,
"loss": 0.5888,
"step": 30500
},
{
"epoch": 3.81,
"learning_rate": 1.7273403582237277e-06,
"loss": 0.5904,
"step": 30750
},
{
"epoch": 3.84,
"learning_rate": 1.2145552667561676e-06,
"loss": 0.5886,
"step": 31000
},
{
"epoch": 3.87,
"learning_rate": 7.915443862174409e-07,
"loss": 0.5904,
"step": 31250
},
{
"epoch": 3.9,
"learning_rate": 4.585629530527768e-07,
"loss": 0.5841,
"step": 31500
},
{
"epoch": 3.93,
"learning_rate": 2.1581188170960438e-07,
"loss": 0.5886,
"step": 31750
},
{
"epoch": 3.96,
"learning_rate": 6.343764340977497e-08,
"loss": 0.5857,
"step": 32000
},
{
"epoch": 3.99,
"learning_rate": 1.5321777716570394e-09,
"loss": 0.5867,
"step": 32250
},
{
"epoch": 4.0,
"eval_corv_score": 0.9830573738929534,
"eval_lev_score": 0.9963118068762173,
"eval_runtime": 116.0292,
"eval_samples_per_second": 44.765,
"eval_steps_per_second": 2.801,
"eval_t5-prp-fluency": 7.493759269714356,
"step": 32296
},
{
"epoch": 4.0,
"step": 32296,
"total_flos": 7.67688706167423e+17,
"train_loss": 0.7648685576303306,
"train_runtime": 24220.9861,
"train_samples_per_second": 170.687,
"train_steps_per_second": 1.333
}
],
"max_steps": 32296,
"num_train_epochs": 4,
"total_flos": 7.67688706167423e+17,
"trial_name": null,
"trial_params": null
}