{ "best_metric": 0.7326732673267327, "best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-4/checkpoint-96", "epoch": 7.0, "eval_steps": 500, "global_step": 336, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.1, "grad_norm": 1.2733114957809448, "learning_rate": 2.2702186710865246e-07, "loss": 0.7025, "step": 5 }, { "epoch": 0.21, "grad_norm": 1.243804931640625, "learning_rate": 4.5404373421730493e-07, "loss": 0.6974, "step": 10 }, { "epoch": 0.31, "grad_norm": 1.7711552381515503, "learning_rate": 6.810656013259573e-07, "loss": 0.696, "step": 15 }, { "epoch": 0.42, "grad_norm": 1.1453403234481812, "learning_rate": 9.080874684346099e-07, "loss": 0.6989, "step": 20 }, { "epoch": 0.52, "grad_norm": 1.2729355096817017, "learning_rate": 1.1351093355432624e-06, "loss": 0.6968, "step": 25 }, { "epoch": 0.62, "grad_norm": 1.1592165231704712, "learning_rate": 1.3621312026519146e-06, "loss": 0.6959, "step": 30 }, { "epoch": 0.73, "grad_norm": 1.1798148155212402, "learning_rate": 1.589153069760567e-06, "loss": 0.6952, "step": 35 }, { "epoch": 0.83, "grad_norm": 2.1216671466827393, "learning_rate": 1.8161749368692197e-06, "loss": 0.6886, "step": 40 }, { "epoch": 0.94, "grad_norm": 1.3416370153427124, "learning_rate": 2.043196803977872e-06, "loss": 0.6864, "step": 45 }, { "epoch": 1.0, "eval_f1": 0.72, "eval_loss": 0.688262939453125, "eval_runtime": 1.3468, "eval_samples_per_second": 47.521, "eval_steps_per_second": 5.94, "step": 48 }, { "epoch": 1.04, "grad_norm": 2.1856281757354736, "learning_rate": 2.169320063482679e-06, "loss": 0.6917, "step": 50 }, { "epoch": 1.15, "grad_norm": 1.4077153205871582, "learning_rate": 2.1440954115817176e-06, "loss": 0.6884, "step": 55 }, { "epoch": 1.25, "grad_norm": 2.1792664527893066, "learning_rate": 2.1188707596807562e-06, "loss": 0.6668, "step": 60 }, { "epoch": 1.35, "grad_norm": 1.0386197566986084, "learning_rate": 2.093646107779795e-06, "loss": 0.6694, "step": 65 }, { "epoch": 1.46, "grad_norm": 2.0565919876098633, "learning_rate": 2.0684214558788335e-06, "loss": 0.6561, "step": 70 }, { "epoch": 1.56, "grad_norm": 1.2978509664535522, "learning_rate": 2.043196803977872e-06, "loss": 0.6789, "step": 75 }, { "epoch": 1.67, "grad_norm": 2.058328628540039, "learning_rate": 2.0179721520769108e-06, "loss": 0.6633, "step": 80 }, { "epoch": 1.77, "grad_norm": 0.6023226976394653, "learning_rate": 1.9927475001759494e-06, "loss": 0.6655, "step": 85 }, { "epoch": 1.88, "grad_norm": 0.5510762929916382, "learning_rate": 1.967522848274988e-06, "loss": 0.6622, "step": 90 }, { "epoch": 1.98, "grad_norm": 1.098602533340454, "learning_rate": 1.9422981963740267e-06, "loss": 0.6633, "step": 95 }, { "epoch": 2.0, "eval_f1": 0.7326732673267327, "eval_loss": 0.6816024780273438, "eval_runtime": 1.3765, "eval_samples_per_second": 46.493, "eval_steps_per_second": 5.812, "step": 96 }, { "epoch": 2.08, "grad_norm": 0.9589098691940308, "learning_rate": 1.9170735444730654e-06, "loss": 0.659, "step": 100 }, { "epoch": 2.19, "grad_norm": 1.070695161819458, "learning_rate": 1.8918488925721038e-06, "loss": 0.6313, "step": 105 }, { "epoch": 2.29, "grad_norm": 0.9913639426231384, "learning_rate": 1.8666242406711424e-06, "loss": 0.6652, "step": 110 }, { "epoch": 2.4, "grad_norm": 1.0632878541946411, "learning_rate": 1.841399588770181e-06, "loss": 0.673, "step": 115 }, { "epoch": 2.5, "grad_norm": 2.1036579608917236, "learning_rate": 1.8161749368692197e-06, "loss": 0.6451, "step": 120 }, { "epoch": 2.6, "grad_norm": 1.08384108543396, "learning_rate": 1.7909502849682583e-06, "loss": 0.6322, "step": 125 }, { "epoch": 2.71, "grad_norm": 0.9407000541687012, "learning_rate": 1.765725633067297e-06, "loss": 0.6755, "step": 130 }, { "epoch": 2.81, "grad_norm": 0.9016568660736084, "learning_rate": 1.7405009811663356e-06, "loss": 0.5985, "step": 135 }, { "epoch": 2.92, "grad_norm": 1.1134448051452637, "learning_rate": 1.7152763292653743e-06, "loss": 0.603, "step": 140 }, { "epoch": 3.0, "eval_f1": 0.7326732673267327, "eval_loss": 0.6800689697265625, "eval_runtime": 1.3861, "eval_samples_per_second": 46.173, "eval_steps_per_second": 5.772, "step": 144 }, { "epoch": 3.02, "grad_norm": 0.7627719640731812, "learning_rate": 1.6900516773644127e-06, "loss": 0.6557, "step": 145 }, { "epoch": 3.12, "grad_norm": 0.9291415214538574, "learning_rate": 1.6648270254634511e-06, "loss": 0.6219, "step": 150 }, { "epoch": 3.23, "grad_norm": 0.9248765707015991, "learning_rate": 1.6396023735624898e-06, "loss": 0.6325, "step": 155 }, { "epoch": 3.33, "grad_norm": 0.9842573404312134, "learning_rate": 1.6143777216615284e-06, "loss": 0.6521, "step": 160 }, { "epoch": 3.44, "grad_norm": 0.8689214587211609, "learning_rate": 1.589153069760567e-06, "loss": 0.5929, "step": 165 }, { "epoch": 3.54, "grad_norm": 1.0012000799179077, "learning_rate": 1.5639284178596057e-06, "loss": 0.584, "step": 170 }, { "epoch": 3.65, "grad_norm": 0.7438368797302246, "learning_rate": 1.5387037659586443e-06, "loss": 0.6813, "step": 175 }, { "epoch": 3.75, "grad_norm": 1.8603870868682861, "learning_rate": 1.513479114057683e-06, "loss": 0.6099, "step": 180 }, { "epoch": 3.85, "grad_norm": 0.9918416738510132, "learning_rate": 1.4882544621567216e-06, "loss": 0.6192, "step": 185 }, { "epoch": 3.96, "grad_norm": 1.9146322011947632, "learning_rate": 1.4630298102557603e-06, "loss": 0.6472, "step": 190 }, { "epoch": 4.0, "eval_f1": 0.7326732673267327, "eval_loss": 0.6818161010742188, "eval_runtime": 1.3841, "eval_samples_per_second": 46.239, "eval_steps_per_second": 5.78, "step": 192 }, { "epoch": 4.06, "grad_norm": 0.9502781629562378, "learning_rate": 1.437805158354799e-06, "loss": 0.6447, "step": 195 }, { "epoch": 4.17, "grad_norm": 0.8570067286491394, "learning_rate": 1.4125805064538375e-06, "loss": 0.5306, "step": 200 }, { "epoch": 4.27, "grad_norm": 0.8097484111785889, "learning_rate": 1.3873558545528762e-06, "loss": 0.6202, "step": 205 }, { "epoch": 4.38, "grad_norm": 2.0106472969055176, "learning_rate": 1.3621312026519146e-06, "loss": 0.6705, "step": 210 }, { "epoch": 4.48, "grad_norm": 1.090775489807129, "learning_rate": 1.3369065507509533e-06, "loss": 0.6297, "step": 215 }, { "epoch": 4.58, "grad_norm": 0.8988145589828491, "learning_rate": 1.311681898849992e-06, "loss": 0.5896, "step": 220 }, { "epoch": 4.69, "grad_norm": 0.9149978756904602, "learning_rate": 1.2864572469490305e-06, "loss": 0.6156, "step": 225 }, { "epoch": 4.79, "grad_norm": 1.9398412704467773, "learning_rate": 1.2612325950480692e-06, "loss": 0.6305, "step": 230 }, { "epoch": 4.9, "grad_norm": 0.9217966794967651, "learning_rate": 1.2360079431471078e-06, "loss": 0.5943, "step": 235 }, { "epoch": 5.0, "grad_norm": 0.9083653688430786, "learning_rate": 1.2107832912461465e-06, "loss": 0.6386, "step": 240 }, { "epoch": 5.0, "eval_f1": 0.7326732673267327, "eval_loss": 0.6846389770507812, "eval_runtime": 1.4094, "eval_samples_per_second": 45.409, "eval_steps_per_second": 5.676, "step": 240 }, { "epoch": 5.1, "grad_norm": 0.9323675036430359, "learning_rate": 1.1855586393451851e-06, "loss": 0.5779, "step": 245 }, { "epoch": 5.21, "grad_norm": 0.7549787163734436, "learning_rate": 1.1603339874442238e-06, "loss": 0.5948, "step": 250 }, { "epoch": 5.31, "grad_norm": 0.8535837531089783, "learning_rate": 1.1351093355432624e-06, "loss": 0.6928, "step": 255 }, { "epoch": 5.42, "grad_norm": 1.2038137912750244, "learning_rate": 1.109884683642301e-06, "loss": 0.5887, "step": 260 }, { "epoch": 5.52, "grad_norm": 0.9501279592514038, "learning_rate": 1.0846600317413395e-06, "loss": 0.5776, "step": 265 }, { "epoch": 5.62, "grad_norm": 0.7421719431877136, "learning_rate": 1.0594353798403781e-06, "loss": 0.6734, "step": 270 }, { "epoch": 5.73, "grad_norm": 0.8555863499641418, "learning_rate": 1.0342107279394168e-06, "loss": 0.6399, "step": 275 }, { "epoch": 5.83, "grad_norm": 0.8841156363487244, "learning_rate": 1.0089860760384554e-06, "loss": 0.6173, "step": 280 }, { "epoch": 5.94, "grad_norm": 0.8565478324890137, "learning_rate": 9.83761424137494e-07, "loss": 0.5537, "step": 285 }, { "epoch": 6.0, "eval_f1": 0.7326732673267327, "eval_loss": 0.6864242553710938, "eval_runtime": 1.3622, "eval_samples_per_second": 46.983, "eval_steps_per_second": 5.873, "step": 288 }, { "epoch": 6.04, "grad_norm": 0.8750139474868774, "learning_rate": 9.585367722365327e-07, "loss": 0.5531, "step": 290 }, { "epoch": 6.15, "grad_norm": 1.0445302724838257, "learning_rate": 9.333121203355712e-07, "loss": 0.638, "step": 295 }, { "epoch": 6.25, "grad_norm": 0.7958914637565613, "learning_rate": 9.080874684346099e-07, "loss": 0.547, "step": 300 }, { "epoch": 6.35, "grad_norm": 0.9992254376411438, "learning_rate": 8.828628165336485e-07, "loss": 0.6425, "step": 305 }, { "epoch": 6.46, "grad_norm": 0.8400682806968689, "learning_rate": 8.576381646326871e-07, "loss": 0.6955, "step": 310 }, { "epoch": 6.56, "grad_norm": 0.742438793182373, "learning_rate": 8.324135127317256e-07, "loss": 0.6473, "step": 315 }, { "epoch": 6.67, "grad_norm": 0.6693254113197327, "learning_rate": 8.071888608307642e-07, "loss": 0.603, "step": 320 }, { "epoch": 6.77, "grad_norm": 1.0816401243209839, "learning_rate": 7.819642089298028e-07, "loss": 0.6053, "step": 325 }, { "epoch": 6.88, "grad_norm": 0.7275277376174927, "learning_rate": 7.567395570288415e-07, "loss": 0.612, "step": 330 }, { "epoch": 6.98, "grad_norm": 0.7834873795509338, "learning_rate": 7.315149051278801e-07, "loss": 0.55, "step": 335 }, { "epoch": 7.0, "eval_f1": 0.7326732673267327, "eval_loss": 0.6889228820800781, "eval_runtime": 1.3769, "eval_samples_per_second": 46.483, "eval_steps_per_second": 5.81, "step": 336 } ], "logging_steps": 5, "max_steps": 480, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 500, "total_flos": 5116387614670704.0, "train_batch_size": 4, "trial_name": null, "trial_params": { "learning_rate": 2.1794099242430636e-06, "per_device_train_batch_size": 4 } }