|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.986666666666667, |
|
"eval_steps": 500, |
|
"global_step": 336, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08888888888888889, |
|
"grad_norm": 9.412764922702348, |
|
"learning_rate": 2.9411764705882354e-05, |
|
"loss": 0.5102, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.17777777777777778, |
|
"grad_norm": 6.313179166401434, |
|
"learning_rate": 5.882352941176471e-05, |
|
"loss": 1.1989, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.26666666666666666, |
|
"grad_norm": 5.655906942216565, |
|
"learning_rate": 8.823529411764706e-05, |
|
"loss": 0.7143, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.35555555555555557, |
|
"grad_norm": 7.038149436459667, |
|
"learning_rate": 9.990263847374976e-05, |
|
"loss": 0.9001, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": 13.082350769847622, |
|
"learning_rate": 9.930902394260747e-05, |
|
"loss": 0.9829, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5333333333333333, |
|
"grad_norm": 5.046437234782843, |
|
"learning_rate": 9.818229479678158e-05, |
|
"loss": 0.9798, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.6222222222222222, |
|
"grad_norm": 5.146826578374137, |
|
"learning_rate": 9.653463289927411e-05, |
|
"loss": 1.0628, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.7111111111111111, |
|
"grad_norm": 4.011140449496279, |
|
"learning_rate": 9.438385228425938e-05, |
|
"loss": 1.0443, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 10.96171152773486, |
|
"learning_rate": 9.175320655700406e-05, |
|
"loss": 1.0457, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 4.961498008924087, |
|
"learning_rate": 8.86711374827494e-05, |
|
"loss": 1.0249, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9777777777777777, |
|
"grad_norm": 4.6475501141852265, |
|
"learning_rate": 8.517096748273951e-05, |
|
"loss": 1.0873, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.0666666666666667, |
|
"grad_norm": 4.428876971621146, |
|
"learning_rate": 8.129053936203687e-05, |
|
"loss": 0.7218, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.1555555555555554, |
|
"grad_norm": 5.801138115283359, |
|
"learning_rate": 7.707180716428237e-05, |
|
"loss": 0.6494, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.2444444444444445, |
|
"grad_norm": 4.0351599378267595, |
|
"learning_rate": 7.256038257695687e-05, |
|
"loss": 0.6379, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 4.0864268190783894, |
|
"learning_rate": 6.780504179127734e-05, |
|
"loss": 0.6164, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.4222222222222223, |
|
"grad_norm": 4.708947394754259, |
|
"learning_rate": 6.28571981484123e-05, |
|
"loss": 0.597, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.511111111111111, |
|
"grad_norm": 4.279448887973702, |
|
"learning_rate": 5.7770346273610254e-05, |
|
"loss": 0.5859, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 2.92470191003172, |
|
"learning_rate": 5.2599483708099016e-05, |
|
"loss": 0.5793, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.6888888888888889, |
|
"grad_norm": 3.4738210484808136, |
|
"learning_rate": 4.740051629190099e-05, |
|
"loss": 0.5843, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.7777777777777777, |
|
"grad_norm": 3.8095727115118607, |
|
"learning_rate": 4.2229653726389765e-05, |
|
"loss": 0.4927, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.8666666666666667, |
|
"grad_norm": 39.411006077166576, |
|
"learning_rate": 3.714280185158771e-05, |
|
"loss": 0.5614, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.9555555555555557, |
|
"grad_norm": 3.0318398508133986, |
|
"learning_rate": 3.219495820872265e-05, |
|
"loss": 0.6168, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.0444444444444443, |
|
"grad_norm": 2.323712025359315, |
|
"learning_rate": 2.7439617423043145e-05, |
|
"loss": 0.4263, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.1333333333333333, |
|
"grad_norm": 2.2885384111375395, |
|
"learning_rate": 2.2928192835717644e-05, |
|
"loss": 0.2691, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.2222222222222223, |
|
"grad_norm": 1.914625424128763, |
|
"learning_rate": 1.8709460637963123e-05, |
|
"loss": 0.2317, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.311111111111111, |
|
"grad_norm": 1.7713461830093022, |
|
"learning_rate": 1.4829032517260489e-05, |
|
"loss": 0.2562, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 2.437839693921665, |
|
"learning_rate": 1.132886251725061e-05, |
|
"loss": 0.2132, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.488888888888889, |
|
"grad_norm": 1.8118456334838464, |
|
"learning_rate": 8.246793442995954e-06, |
|
"loss": 0.2589, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.5777777777777775, |
|
"grad_norm": 2.5235712198342437, |
|
"learning_rate": 5.616147715740611e-06, |
|
"loss": 0.2305, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 2.6667275190466975, |
|
"learning_rate": 3.465367100725908e-06, |
|
"loss": 0.2083, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.7555555555555555, |
|
"grad_norm": 2.5602982214331, |
|
"learning_rate": 1.8177052032184283e-06, |
|
"loss": 0.212, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.8444444444444446, |
|
"grad_norm": 2.356039432068881, |
|
"learning_rate": 6.909760573925561e-07, |
|
"loss": 0.2055, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.9333333333333336, |
|
"grad_norm": 1.9207416625625227, |
|
"learning_rate": 9.73615262502503e-08, |
|
"loss": 0.1871, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.986666666666667, |
|
"step": 336, |
|
"total_flos": 1204578902016.0, |
|
"train_loss": 0.5951760939898945, |
|
"train_runtime": 1003.6822, |
|
"train_samples_per_second": 2.69, |
|
"train_steps_per_second": 0.335 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 336, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1204578902016.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|