{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.9979123173277662, "eval_steps": 500, "global_step": 239, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0041753653444676405, "grad_norm": 14820.435529068707, "learning_rate": 1.2499999999999999e-05, "loss": 60.135, "step": 1 }, { "epoch": 0.020876826722338204, "grad_norm": 1628.1311380537702, "learning_rate": 6.25e-05, "loss": 50.6887, "step": 5 }, { "epoch": 0.04175365344467641, "grad_norm": 3033.800304830903, "learning_rate": 0.000125, "loss": 42.1741, "step": 10 }, { "epoch": 0.06263048016701461, "grad_norm": 6954.121232870651, "learning_rate": 0.00018749999999999998, "loss": 161.1634, "step": 15 }, { "epoch": 0.08350730688935282, "grad_norm": 850.7915596439383, "learning_rate": 0.00025, "loss": 187.8921, "step": 20 }, { "epoch": 0.10438413361169102, "grad_norm": 51.008981364388355, "learning_rate": 0.0002999839868651235, "loss": 27.0222, "step": 25 }, { "epoch": 0.12526096033402923, "grad_norm": 41.28593832910529, "learning_rate": 0.000299423886051382, "loss": 40.1282, "step": 30 }, { "epoch": 0.14613778705636743, "grad_norm": 94.54405893422229, "learning_rate": 0.0002980665441538907, "loss": 22.8182, "step": 35 }, { "epoch": 0.16701461377870563, "grad_norm": 117.0436511760194, "learning_rate": 0.0002959192031789579, "loss": 19.1922, "step": 40 }, { "epoch": 0.18789144050104384, "grad_norm": 23.24704464882293, "learning_rate": 0.00029299332011978107, "loss": 17.9784, "step": 45 }, { "epoch": 0.20876826722338204, "grad_norm": 67.09115479950934, "learning_rate": 0.0002893045058284311, "loss": 13.938, "step": 50 }, { "epoch": 0.22964509394572025, "grad_norm": 74.81050241825953, "learning_rate": 0.00028487244172520246, "loss": 12.678, "step": 55 }, { "epoch": 0.25052192066805845, "grad_norm": 36.783670859376656, "learning_rate": 0.0002797207747897198, "loss": 11.5948, "step": 60 }, { "epoch": 0.27139874739039666, "grad_norm": 42.67794856216472, "learning_rate": 0.0002738769913940706, "loss": 9.6311, "step": 65 }, { "epoch": 0.29227557411273486, "grad_norm": 20.131225618062707, "learning_rate": 0.0002673722706511174, "loss": 8.6553, "step": 70 }, { "epoch": 0.31315240083507306, "grad_norm": 16.501356621775457, "learning_rate": 0.0002602413180604401, "loss": 7.8996, "step": 75 }, { "epoch": 0.33402922755741127, "grad_norm": 17.24824317039284, "learning_rate": 0.00025252218033947993, "loss": 7.1388, "step": 80 }, { "epoch": 0.35490605427974947, "grad_norm": 23.28427284101539, "learning_rate": 0.0002442560424278399, "loss": 6.7571, "step": 85 }, { "epoch": 0.3757828810020877, "grad_norm": 19.752003570940577, "learning_rate": 0.00023548700774781242, "loss": 6.4415, "step": 90 }, { "epoch": 0.3966597077244259, "grad_norm": 16.62982088461208, "learning_rate": 0.00022626186289353913, "loss": 6.4479, "step": 95 }, { "epoch": 0.4175365344467641, "grad_norm": 23.009578526601246, "learning_rate": 0.0002166298280042877, "loss": 6.1347, "step": 100 }, { "epoch": 0.4384133611691023, "grad_norm": 9.902525054887667, "learning_rate": 0.00020664229415371266, "loss": 5.868, "step": 105 }, { "epoch": 0.4592901878914405, "grad_norm": 18.872092670717567, "learning_rate": 0.0001963525491562421, "loss": 5.8684, "step": 110 }, { "epoch": 0.4801670146137787, "grad_norm": 10.396862801643927, "learning_rate": 0.00018581549325353126, "loss": 5.6768, "step": 115 }, { "epoch": 0.5010438413361169, "grad_norm": 6.643173772718449, "learning_rate": 0.00017508734619791966, "loss": 5.5378, "step": 120 }, { "epoch": 0.5219206680584552, "grad_norm": 13.620597347405528, "learning_rate": 0.00016422534729572738, "loss": 5.4243, "step": 125 }, { "epoch": 0.5427974947807933, "grad_norm": 12.675575738004211, "learning_rate": 0.0001532874500107902, "loss": 5.4237, "step": 130 }, { "epoch": 0.5636743215031316, "grad_norm": 10.600479885243871, "learning_rate": 0.00014233201275765494, "loss": 5.3795, "step": 135 }, { "epoch": 0.5845511482254697, "grad_norm": 14.207759763111122, "learning_rate": 0.0001314174875341878, "loss": 5.2682, "step": 140 }, { "epoch": 0.605427974947808, "grad_norm": 10.28585644469174, "learning_rate": 0.00012060210805487529, "loss": 5.1446, "step": 145 }, { "epoch": 0.6263048016701461, "grad_norm": 9.411071111902643, "learning_rate": 0.00010994357904876106, "loss": 5.0323, "step": 150 }, { "epoch": 0.6471816283924844, "grad_norm": 4.20316220773891, "learning_rate": 9.949876837974944e-05, "loss": 4.9314, "step": 155 }, { "epoch": 0.6680584551148225, "grad_norm": 6.642144688442386, "learning_rate": 8.932340363194595e-05, "loss": 4.9206, "step": 160 }, { "epoch": 0.6889352818371608, "grad_norm": 7.2411357694832255, "learning_rate": 7.947177477888472e-05, "loss": 4.8218, "step": 165 }, { "epoch": 0.7098121085594989, "grad_norm": 6.883273869392802, "learning_rate": 6.999644452302975e-05, "loss": 4.6982, "step": 170 }, { "epoch": 0.7306889352818372, "grad_norm": 5.045481934015123, "learning_rate": 6.0947967851014405e-05, "loss": 4.6601, "step": 175 }, { "epoch": 0.7515657620041754, "grad_norm": 4.832077424408084, "learning_rate": 5.237462230091467e-05, "loss": 4.5969, "step": 180 }, { "epoch": 0.7724425887265136, "grad_norm": 4.500645176472541, "learning_rate": 4.432215038069449e-05, "loss": 4.548, "step": 185 }, { "epoch": 0.7933194154488518, "grad_norm": 3.503698684667856, "learning_rate": 3.6833515512134606e-05, "loss": 4.5618, "step": 190 }, { "epoch": 0.81419624217119, "grad_norm": 3.680197433632794, "learning_rate": 2.9948672802388135e-05, "loss": 4.5023, "step": 195 }, { "epoch": 0.8350730688935282, "grad_norm": 2.8773090613106302, "learning_rate": 2.3704355866196373e-05, "loss": 4.5226, "step": 200 }, { "epoch": 0.8559498956158664, "grad_norm": 2.5614101608626467, "learning_rate": 1.813388083616068e-05, "loss": 4.4463, "step": 205 }, { "epoch": 0.8768267223382046, "grad_norm": 2.2102224877308676, "learning_rate": 1.326696860675981e-05, "loss": 4.3724, "step": 210 }, { "epoch": 0.8977035490605428, "grad_norm": 2.056590025798714, "learning_rate": 9.129586260518634e-06, "loss": 4.3747, "step": 215 }, { "epoch": 0.918580375782881, "grad_norm": 1.7949115931981623, "learning_rate": 5.743808522387544e-06, "loss": 4.3585, "step": 220 }, { "epoch": 0.9394572025052192, "grad_norm": 1.5441796176488383, "learning_rate": 3.1276999815337544e-06, "loss": 4.3784, "step": 225 }, { "epoch": 0.9603340292275574, "grad_norm": 1.549016859105234, "learning_rate": 1.2952187089419642e-06, "loss": 4.3766, "step": 230 }, { "epoch": 0.9812108559498957, "grad_norm": 1.5003100935642444, "learning_rate": 2.5614178506644934e-07, "loss": 4.3115, "step": 235 }, { "epoch": 0.9979123173277662, "eval_loss": 5.734466552734375, "eval_runtime": 1.4349, "eval_samples_per_second": 1.394, "eval_steps_per_second": 0.697, "step": 239 }, { "epoch": 0.9979123173277662, "step": 239, "total_flos": 8193589641216.0, "train_loss": 2.8353221366595025, "train_runtime": 2240.0339, "train_samples_per_second": 3.416, "train_steps_per_second": 0.107 } ], "logging_steps": 5, "max_steps": 239, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 100, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 8193589641216.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }