|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"global_step": 67130, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.000297765529569492, |
|
"loss": 4.2198, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.00029553105913898405, |
|
"loss": 3.702, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00029329658870847603, |
|
"loss": 3.5122, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 0.00029106211827796807, |
|
"loss": 3.4147, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.0002888276478474601, |
|
"loss": 3.3907, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 0.0002865931774169522, |
|
"loss": 3.3163, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 0.0002843587069864442, |
|
"loss": 3.253, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 0.0002821242365559362, |
|
"loss": 3.248, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 0.00027988976612542825, |
|
"loss": 3.2532, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.0002776552956949203, |
|
"loss": 3.2179, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"eval_gen_len": 13.5337, |
|
"eval_loss": 2.7812678813934326, |
|
"eval_rouge1": 25.8006, |
|
"eval_rouge2": 9.3551, |
|
"eval_rougeL": 23.386, |
|
"eval_rougeLsum": 23.5287, |
|
"eval_runtime": 531.59, |
|
"eval_samples_per_second": 18.811, |
|
"eval_steps_per_second": 2.351, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.0002754208252644123, |
|
"loss": 3.1697, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.0002731863548339043, |
|
"loss": 3.1744, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.00027095188440339634, |
|
"loss": 3.1526, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0002687174139728884, |
|
"loss": 3.0155, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0002664829435423804, |
|
"loss": 2.9585, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.00026424847311187245, |
|
"loss": 2.9227, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.0002620140026813645, |
|
"loss": 2.9469, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00025977953225085653, |
|
"loss": 2.9259, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 0.00025754506182034857, |
|
"loss": 2.9517, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 0.0002553105913898406, |
|
"loss": 2.9248, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"eval_gen_len": 13.243, |
|
"eval_loss": 2.691446542739868, |
|
"eval_rouge1": 27.0409, |
|
"eval_rouge2": 10.0228, |
|
"eval_rougeL": 24.4581, |
|
"eval_rougeLsum": 24.6197, |
|
"eval_runtime": 528.3278, |
|
"eval_samples_per_second": 18.928, |
|
"eval_steps_per_second": 2.366, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 0.0002530761209593326, |
|
"loss": 2.9173, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 0.0002508416505288246, |
|
"loss": 2.923, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 0.00024860718009831666, |
|
"loss": 2.9318, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 0.0002463727096678087, |
|
"loss": 2.901, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 0.00024413823923730073, |
|
"loss": 2.8678, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 0.00024190376880679277, |
|
"loss": 2.8576, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 0.0002396692983762848, |
|
"loss": 2.8734, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.00023743482794577684, |
|
"loss": 2.6712, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 0.00023520035751526885, |
|
"loss": 2.6779, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.0002329658870847609, |
|
"loss": 2.6813, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"eval_gen_len": 14.3052, |
|
"eval_loss": 2.6461832523345947, |
|
"eval_rouge1": 27.5333, |
|
"eval_rouge2": 10.3641, |
|
"eval_rougeL": 24.8696, |
|
"eval_rougeLsum": 25.0564, |
|
"eval_runtime": 527.1253, |
|
"eval_samples_per_second": 18.971, |
|
"eval_steps_per_second": 2.371, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 0.00023073141665425293, |
|
"loss": 2.6964, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 0.00022849694622374496, |
|
"loss": 2.6816, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 0.000226262475793237, |
|
"loss": 2.669, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 0.000224028005362729, |
|
"loss": 2.6871, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 0.00022179353493222105, |
|
"loss": 2.6783, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 0.00021955906450171308, |
|
"loss": 2.7131, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.00021732459407120512, |
|
"loss": 2.7029, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 0.00021509012364069713, |
|
"loss": 2.7081, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.00021285565321018917, |
|
"loss": 2.6989, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 0.0002106211827796812, |
|
"loss": 2.691, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"eval_gen_len": 14.5279, |
|
"eval_loss": 2.6205477714538574, |
|
"eval_rouge1": 28.3681, |
|
"eval_rouge2": 10.8961, |
|
"eval_rougeL": 25.5144, |
|
"eval_rougeLsum": 25.722, |
|
"eval_runtime": 530.0749, |
|
"eval_samples_per_second": 18.865, |
|
"eval_steps_per_second": 2.358, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 0.00020838671234917324, |
|
"loss": 2.5377, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 3.13, |
|
"learning_rate": 0.00020615224191866528, |
|
"loss": 2.4832, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.0002039177714881573, |
|
"loss": 2.4958, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 3.28, |
|
"learning_rate": 0.00020168330105764932, |
|
"loss": 2.4949, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 3.35, |
|
"learning_rate": 0.00019944883062714136, |
|
"loss": 2.4796, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 0.0001972143601966334, |
|
"loss": 2.5323, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"learning_rate": 0.0001949798897661254, |
|
"loss": 2.515, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 3.58, |
|
"learning_rate": 0.00019274541933561744, |
|
"loss": 2.5291, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 3.65, |
|
"learning_rate": 0.00019051094890510948, |
|
"loss": 2.5206, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"learning_rate": 0.00018827647847460152, |
|
"loss": 2.5127, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 3.72, |
|
"eval_gen_len": 14.0721, |
|
"eval_loss": 2.6042990684509277, |
|
"eval_rouge1": 28.5979, |
|
"eval_rouge2": 11.0477, |
|
"eval_rougeL": 25.759, |
|
"eval_rougeLsum": 25.9605, |
|
"eval_runtime": 523.9369, |
|
"eval_samples_per_second": 19.086, |
|
"eval_steps_per_second": 2.386, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 0.00018604200804409355, |
|
"loss": 2.5203, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 3.87, |
|
"learning_rate": 0.00018380753761358556, |
|
"loss": 2.5064, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 3.95, |
|
"learning_rate": 0.0001815730671830776, |
|
"loss": 2.4892, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 0.00017933859675256964, |
|
"loss": 2.4728, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 0.00017710412632206167, |
|
"loss": 2.319, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 0.00017486965589155368, |
|
"loss": 2.3135, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 4.25, |
|
"learning_rate": 0.00017263518546104572, |
|
"loss": 2.339, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 0.00017040071503053776, |
|
"loss": 2.3818, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 0.0001681662446000298, |
|
"loss": 2.3551, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"learning_rate": 0.00016593177416952183, |
|
"loss": 2.3331, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 4.47, |
|
"eval_gen_len": 14.4519, |
|
"eval_loss": 2.6282742023468018, |
|
"eval_rouge1": 28.9106, |
|
"eval_rouge2": 11.3727, |
|
"eval_rougeL": 25.9338, |
|
"eval_rougeLsum": 26.1387, |
|
"eval_runtime": 544.5243, |
|
"eval_samples_per_second": 18.365, |
|
"eval_steps_per_second": 2.296, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 4.54, |
|
"learning_rate": 0.00016369730373901384, |
|
"loss": 2.3632, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 4.62, |
|
"learning_rate": 0.00016146283330850588, |
|
"loss": 2.3592, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 4.69, |
|
"learning_rate": 0.00015922836287799791, |
|
"loss": 2.3468, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 0.00015699389244748995, |
|
"loss": 2.361, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 4.84, |
|
"learning_rate": 0.00015475942201698196, |
|
"loss": 2.3444, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 0.000152524951586474, |
|
"loss": 2.3435, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 0.00015029048115596604, |
|
"loss": 2.3755, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 5.06, |
|
"learning_rate": 0.00014805601072545804, |
|
"loss": 2.2407, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 0.00014582154029495008, |
|
"loss": 2.2036, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"learning_rate": 0.00014358706986444212, |
|
"loss": 2.2034, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 5.21, |
|
"eval_gen_len": 13.8715, |
|
"eval_loss": 2.6400413513183594, |
|
"eval_rouge1": 29.099, |
|
"eval_rouge2": 11.2376, |
|
"eval_rougeL": 26.1221, |
|
"eval_rougeLsum": 26.3568, |
|
"eval_runtime": 529.9312, |
|
"eval_samples_per_second": 18.87, |
|
"eval_steps_per_second": 2.359, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 0.00014135259943393416, |
|
"loss": 2.1988, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 0.0001391181290034262, |
|
"loss": 2.2228, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 0.0001368836585729182, |
|
"loss": 2.2078, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 0.00013464918814241024, |
|
"loss": 2.2399, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 5.59, |
|
"learning_rate": 0.00013241471771190228, |
|
"loss": 2.2287, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 5.66, |
|
"learning_rate": 0.0001301802472813943, |
|
"loss": 2.2174, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 5.74, |
|
"learning_rate": 0.00012794577685088632, |
|
"loss": 2.2216, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 0.00012571130642037836, |
|
"loss": 2.2196, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 0.00012347683598987037, |
|
"loss": 2.2432, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"learning_rate": 0.00012124236555936242, |
|
"loss": 2.2137, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 5.96, |
|
"eval_gen_len": 14.5981, |
|
"eval_loss": 2.634021759033203, |
|
"eval_rouge1": 29.2641, |
|
"eval_rouge2": 11.3565, |
|
"eval_rougeL": 26.2012, |
|
"eval_rougeLsum": 26.4214, |
|
"eval_runtime": 532.2281, |
|
"eval_samples_per_second": 18.789, |
|
"eval_steps_per_second": 2.349, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 6.03, |
|
"learning_rate": 0.00011900789512885446, |
|
"loss": 2.1445, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 6.11, |
|
"learning_rate": 0.00011677342469834648, |
|
"loss": 2.0664, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 0.00011453895426783852, |
|
"loss": 2.094, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 0.00011230448383733054, |
|
"loss": 2.1048, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 0.00011007001340682258, |
|
"loss": 2.1001, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 6.41, |
|
"learning_rate": 0.0001078355429763146, |
|
"loss": 2.1141, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 6.48, |
|
"learning_rate": 0.00010560107254580664, |
|
"loss": 2.0866, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 0.00010336660211529866, |
|
"loss": 2.0916, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 6.63, |
|
"learning_rate": 0.0001011321316847907, |
|
"loss": 2.1056, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"learning_rate": 9.889766125428273e-05, |
|
"loss": 2.1104, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 6.7, |
|
"eval_gen_len": 13.888, |
|
"eval_loss": 2.6362199783325195, |
|
"eval_rouge1": 29.6204, |
|
"eval_rouge2": 11.6807, |
|
"eval_rougeL": 26.5976, |
|
"eval_rougeLsum": 26.8261, |
|
"eval_runtime": 537.398, |
|
"eval_samples_per_second": 18.608, |
|
"eval_steps_per_second": 2.326, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 6.78, |
|
"learning_rate": 9.666319082377476e-05, |
|
"loss": 2.1206, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 6.85, |
|
"learning_rate": 9.44287203932668e-05, |
|
"loss": 2.1124, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 6.93, |
|
"learning_rate": 9.219424996275882e-05, |
|
"loss": 2.1211, |
|
"step": 46500 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 8.995977953225085e-05, |
|
"loss": 2.1063, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 7.08, |
|
"learning_rate": 8.772530910174288e-05, |
|
"loss": 1.9698, |
|
"step": 47500 |
|
}, |
|
{ |
|
"epoch": 7.15, |
|
"learning_rate": 8.549083867123491e-05, |
|
"loss": 1.9693, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 7.22, |
|
"learning_rate": 8.325636824072694e-05, |
|
"loss": 2.0098, |
|
"step": 48500 |
|
}, |
|
{ |
|
"epoch": 7.3, |
|
"learning_rate": 8.102189781021897e-05, |
|
"loss": 1.9976, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 7.37, |
|
"learning_rate": 7.8787427379711e-05, |
|
"loss": 2.0059, |
|
"step": 49500 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 7.655295694920303e-05, |
|
"loss": 2.003, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"eval_gen_len": 14.2246, |
|
"eval_loss": 2.6541028022766113, |
|
"eval_rouge1": 29.5679, |
|
"eval_rouge2": 11.6334, |
|
"eval_rougeL": 26.5095, |
|
"eval_rougeLsum": 26.7418, |
|
"eval_runtime": 534.455, |
|
"eval_samples_per_second": 18.711, |
|
"eval_steps_per_second": 2.339, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 7.52, |
|
"learning_rate": 7.431848651869506e-05, |
|
"loss": 1.9752, |
|
"step": 50500 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"learning_rate": 7.20840160881871e-05, |
|
"loss": 2.0409, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 6.984954565767912e-05, |
|
"loss": 2.0181, |
|
"step": 51500 |
|
}, |
|
{ |
|
"epoch": 7.75, |
|
"learning_rate": 6.761507522717115e-05, |
|
"loss": 1.9967, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 7.82, |
|
"learning_rate": 6.538060479666319e-05, |
|
"loss": 1.9883, |
|
"step": 52500 |
|
}, |
|
{ |
|
"epoch": 7.9, |
|
"learning_rate": 6.314613436615521e-05, |
|
"loss": 2.0096, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 7.97, |
|
"learning_rate": 6.091166393564725e-05, |
|
"loss": 2.0075, |
|
"step": 53500 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 5.867719350513928e-05, |
|
"loss": 1.9409, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 8.12, |
|
"learning_rate": 5.6442723074631304e-05, |
|
"loss": 1.9271, |
|
"step": 54500 |
|
}, |
|
{ |
|
"epoch": 8.19, |
|
"learning_rate": 5.4208252644123334e-05, |
|
"loss": 1.8955, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 8.19, |
|
"eval_gen_len": 14.3902, |
|
"eval_loss": 2.694044828414917, |
|
"eval_rouge1": 29.6748, |
|
"eval_rouge2": 11.5897, |
|
"eval_rougeL": 26.4862, |
|
"eval_rougeLsum": 26.7581, |
|
"eval_runtime": 535.4767, |
|
"eval_samples_per_second": 18.675, |
|
"eval_steps_per_second": 2.334, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 8.27, |
|
"learning_rate": 5.1973782213615364e-05, |
|
"loss": 1.9114, |
|
"step": 55500 |
|
}, |
|
{ |
|
"epoch": 8.34, |
|
"learning_rate": 4.9739311783107394e-05, |
|
"loss": 1.9337, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 8.42, |
|
"learning_rate": 4.7504841352599425e-05, |
|
"loss": 1.9088, |
|
"step": 56500 |
|
}, |
|
{ |
|
"epoch": 8.49, |
|
"learning_rate": 4.5270370922091455e-05, |
|
"loss": 1.9104, |
|
"step": 57000 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 4.303590049158349e-05, |
|
"loss": 1.9167, |
|
"step": 57500 |
|
}, |
|
{ |
|
"epoch": 8.64, |
|
"learning_rate": 4.080143006107552e-05, |
|
"loss": 1.9146, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"learning_rate": 3.856695963056755e-05, |
|
"loss": 1.8965, |
|
"step": 58500 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"learning_rate": 3.633248920005958e-05, |
|
"loss": 1.9186, |
|
"step": 59000 |
|
}, |
|
{ |
|
"epoch": 8.86, |
|
"learning_rate": 3.409801876955161e-05, |
|
"loss": 1.9153, |
|
"step": 59500 |
|
}, |
|
{ |
|
"epoch": 8.94, |
|
"learning_rate": 3.186354833904364e-05, |
|
"loss": 1.912, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 8.94, |
|
"eval_gen_len": 14.3574, |
|
"eval_loss": 2.6882503032684326, |
|
"eval_rouge1": 29.7285, |
|
"eval_rouge2": 11.6448, |
|
"eval_rougeL": 26.5368, |
|
"eval_rougeLsum": 26.7806, |
|
"eval_runtime": 531.6791, |
|
"eval_samples_per_second": 18.808, |
|
"eval_steps_per_second": 2.351, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 2.9629077908535672e-05, |
|
"loss": 1.9182, |
|
"step": 60500 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 2.7394607478027705e-05, |
|
"loss": 1.8865, |
|
"step": 61000 |
|
}, |
|
{ |
|
"epoch": 9.16, |
|
"learning_rate": 2.5160137047519735e-05, |
|
"loss": 1.8585, |
|
"step": 61500 |
|
}, |
|
{ |
|
"epoch": 9.24, |
|
"learning_rate": 2.2925666617011765e-05, |
|
"loss": 1.8478, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 9.31, |
|
"learning_rate": 2.0691196186503795e-05, |
|
"loss": 1.8604, |
|
"step": 62500 |
|
}, |
|
{ |
|
"epoch": 9.38, |
|
"learning_rate": 1.845672575599583e-05, |
|
"loss": 1.8599, |
|
"step": 63000 |
|
}, |
|
{ |
|
"epoch": 9.46, |
|
"learning_rate": 1.622225532548786e-05, |
|
"loss": 1.8661, |
|
"step": 63500 |
|
}, |
|
{ |
|
"epoch": 9.53, |
|
"learning_rate": 1.3987784894979889e-05, |
|
"loss": 1.8651, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 9.61, |
|
"learning_rate": 1.1753314464471919e-05, |
|
"loss": 1.8657, |
|
"step": 64500 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"learning_rate": 9.518844033963949e-06, |
|
"loss": 1.8581, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 9.68, |
|
"eval_gen_len": 14.3821, |
|
"eval_loss": 2.687427282333374, |
|
"eval_rouge1": 29.7373, |
|
"eval_rouge2": 11.6532, |
|
"eval_rougeL": 26.4799, |
|
"eval_rougeLsum": 26.738, |
|
"eval_runtime": 539.441, |
|
"eval_samples_per_second": 18.538, |
|
"eval_steps_per_second": 2.317, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 9.76, |
|
"learning_rate": 7.284373603455981e-06, |
|
"loss": 1.8359, |
|
"step": 65500 |
|
}, |
|
{ |
|
"epoch": 9.83, |
|
"learning_rate": 5.04990317294801e-06, |
|
"loss": 1.8372, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 9.91, |
|
"learning_rate": 2.815432742440042e-06, |
|
"loss": 1.8536, |
|
"step": 66500 |
|
}, |
|
{ |
|
"epoch": 9.98, |
|
"learning_rate": 5.809623119320721e-07, |
|
"loss": 1.8537, |
|
"step": 67000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 67130, |
|
"total_flos": 2.0718221980336128e+17, |
|
"train_loss": 2.3931242498576544, |
|
"train_runtime": 39455.2858, |
|
"train_samples_per_second": 13.611, |
|
"train_steps_per_second": 1.701 |
|
} |
|
], |
|
"max_steps": 67130, |
|
"num_train_epochs": 10, |
|
"total_flos": 2.0718221980336128e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|