TinyLlama-Cinder-Agent-Rag / trainer_state.json
Josephgflowers's picture
Training in progress, step 5958
09dd5e4 verified
raw
history blame
10.9 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 5958,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016784155756965426,
"grad_norm": 90801.3515625,
"learning_rate": 4.916079221215173e-05,
"loss": 0.9925,
"step": 100
},
{
"epoch": 0.03356831151393085,
"grad_norm": 83764.0078125,
"learning_rate": 4.832158442430346e-05,
"loss": 0.9625,
"step": 200
},
{
"epoch": 0.050352467270896276,
"grad_norm": 84057.3125,
"learning_rate": 4.748237663645519e-05,
"loss": 0.934,
"step": 300
},
{
"epoch": 0.0671366230278617,
"grad_norm": 76437.0546875,
"learning_rate": 4.664316884860692e-05,
"loss": 0.9354,
"step": 400
},
{
"epoch": 0.08392077878482712,
"grad_norm": 76537.3359375,
"learning_rate": 4.5803961060758646e-05,
"loss": 0.934,
"step": 500
},
{
"epoch": 0.10070493454179255,
"grad_norm": 86227.09375,
"learning_rate": 4.4964753272910375e-05,
"loss": 0.9027,
"step": 600
},
{
"epoch": 0.11748909029875797,
"grad_norm": 86583.5078125,
"learning_rate": 4.4125545485062104e-05,
"loss": 0.8981,
"step": 700
},
{
"epoch": 0.1342732460557234,
"grad_norm": 59143.1171875,
"learning_rate": 4.328633769721383e-05,
"loss": 0.9329,
"step": 800
},
{
"epoch": 0.1510574018126888,
"grad_norm": 77289.375,
"learning_rate": 4.244712990936556e-05,
"loss": 0.9125,
"step": 900
},
{
"epoch": 0.16784155756965424,
"grad_norm": 66578.9765625,
"learning_rate": 4.160792212151729e-05,
"loss": 0.9198,
"step": 1000
},
{
"epoch": 0.18462571332661967,
"grad_norm": 64711.90234375,
"learning_rate": 4.076871433366902e-05,
"loss": 0.8985,
"step": 1100
},
{
"epoch": 0.2014098690835851,
"grad_norm": 74255.734375,
"learning_rate": 3.992950654582075e-05,
"loss": 0.9154,
"step": 1200
},
{
"epoch": 0.21819402484055053,
"grad_norm": 76627.859375,
"learning_rate": 3.9090298757972476e-05,
"loss": 0.907,
"step": 1300
},
{
"epoch": 0.23497818059751593,
"grad_norm": 69068.90625,
"learning_rate": 3.8251090970124205e-05,
"loss": 0.911,
"step": 1400
},
{
"epoch": 0.25176233635448136,
"grad_norm": 63122.66796875,
"learning_rate": 3.7411883182275934e-05,
"loss": 0.8597,
"step": 1500
},
{
"epoch": 0.2685464921114468,
"grad_norm": 76006.2265625,
"learning_rate": 3.657267539442766e-05,
"loss": 0.884,
"step": 1600
},
{
"epoch": 0.2853306478684122,
"grad_norm": 64542.19140625,
"learning_rate": 3.573346760657939e-05,
"loss": 0.8884,
"step": 1700
},
{
"epoch": 0.3021148036253776,
"grad_norm": 65309.5546875,
"learning_rate": 3.489425981873112e-05,
"loss": 0.8707,
"step": 1800
},
{
"epoch": 0.3188989593823431,
"grad_norm": 81841.5546875,
"learning_rate": 3.405505203088285e-05,
"loss": 0.8818,
"step": 1900
},
{
"epoch": 0.3356831151393085,
"grad_norm": 74731.21875,
"learning_rate": 3.321584424303458e-05,
"loss": 0.8635,
"step": 2000
},
{
"epoch": 0.35246727089627394,
"grad_norm": 59748.52734375,
"learning_rate": 3.2376636455186307e-05,
"loss": 0.8827,
"step": 2100
},
{
"epoch": 0.36925142665323935,
"grad_norm": 78755.6796875,
"learning_rate": 3.1537428667338035e-05,
"loss": 0.8564,
"step": 2200
},
{
"epoch": 0.38603558241020475,
"grad_norm": 68127.890625,
"learning_rate": 3.0698220879489764e-05,
"loss": 0.8619,
"step": 2300
},
{
"epoch": 0.4028197381671702,
"grad_norm": 80441.515625,
"learning_rate": 2.9859013091641493e-05,
"loss": 0.8421,
"step": 2400
},
{
"epoch": 0.4196038939241356,
"grad_norm": 69039.3203125,
"learning_rate": 2.9019805303793218e-05,
"loss": 0.8639,
"step": 2500
},
{
"epoch": 0.43638804968110106,
"grad_norm": 70787.9921875,
"learning_rate": 2.818059751594495e-05,
"loss": 0.8428,
"step": 2600
},
{
"epoch": 0.45317220543806647,
"grad_norm": 73120.625,
"learning_rate": 2.734138972809668e-05,
"loss": 0.8595,
"step": 2700
},
{
"epoch": 0.46995636119503187,
"grad_norm": 63243.34375,
"learning_rate": 2.6502181940248405e-05,
"loss": 0.8359,
"step": 2800
},
{
"epoch": 0.4867405169519973,
"grad_norm": 69866.1015625,
"learning_rate": 2.5662974152400137e-05,
"loss": 0.8351,
"step": 2900
},
{
"epoch": 0.5035246727089627,
"grad_norm": 70137.2109375,
"learning_rate": 2.4823766364551865e-05,
"loss": 0.8191,
"step": 3000
},
{
"epoch": 0.5203088284659282,
"grad_norm": 75627.3203125,
"learning_rate": 2.398455857670359e-05,
"loss": 0.8473,
"step": 3100
},
{
"epoch": 0.5370929842228936,
"grad_norm": 66008.0,
"learning_rate": 2.3145350788855323e-05,
"loss": 0.8263,
"step": 3200
},
{
"epoch": 0.553877139979859,
"grad_norm": 65289.640625,
"learning_rate": 2.2306143001007052e-05,
"loss": 0.8386,
"step": 3300
},
{
"epoch": 0.5706612957368244,
"grad_norm": 73041.9921875,
"learning_rate": 2.1466935213158777e-05,
"loss": 0.8287,
"step": 3400
},
{
"epoch": 0.5874454514937899,
"grad_norm": 66999.875,
"learning_rate": 2.062772742531051e-05,
"loss": 0.8203,
"step": 3500
},
{
"epoch": 0.6042296072507553,
"grad_norm": 68238.859375,
"learning_rate": 1.9788519637462235e-05,
"loss": 0.831,
"step": 3600
},
{
"epoch": 0.6210137630077207,
"grad_norm": 75512.6796875,
"learning_rate": 1.8949311849613967e-05,
"loss": 0.8046,
"step": 3700
},
{
"epoch": 0.6377979187646862,
"grad_norm": 69572.578125,
"learning_rate": 1.8110104061765696e-05,
"loss": 0.8075,
"step": 3800
},
{
"epoch": 0.6545820745216515,
"grad_norm": 70714.1953125,
"learning_rate": 1.727089627391742e-05,
"loss": 0.8386,
"step": 3900
},
{
"epoch": 0.671366230278617,
"grad_norm": 70761.921875,
"learning_rate": 1.6431688486069153e-05,
"loss": 0.7866,
"step": 4000
},
{
"epoch": 0.6881503860355824,
"grad_norm": 69547.9375,
"learning_rate": 1.559248069822088e-05,
"loss": 0.8212,
"step": 4100
},
{
"epoch": 0.7049345417925479,
"grad_norm": 69571.3046875,
"learning_rate": 1.4753272910372609e-05,
"loss": 0.7977,
"step": 4200
},
{
"epoch": 0.7217186975495132,
"grad_norm": 64268.54296875,
"learning_rate": 1.391406512252434e-05,
"loss": 0.7886,
"step": 4300
},
{
"epoch": 0.7385028533064787,
"grad_norm": 70546.71875,
"learning_rate": 1.3074857334676067e-05,
"loss": 0.7956,
"step": 4400
},
{
"epoch": 0.7552870090634441,
"grad_norm": 67713.59375,
"learning_rate": 1.2235649546827795e-05,
"loss": 0.7939,
"step": 4500
},
{
"epoch": 0.7720711648204095,
"grad_norm": 69085.0703125,
"learning_rate": 1.1396441758979524e-05,
"loss": 0.7857,
"step": 4600
},
{
"epoch": 0.788855320577375,
"grad_norm": 76299.1015625,
"learning_rate": 1.0557233971131253e-05,
"loss": 0.7889,
"step": 4700
},
{
"epoch": 0.8056394763343404,
"grad_norm": 84437.421875,
"learning_rate": 9.718026183282982e-06,
"loss": 0.7926,
"step": 4800
},
{
"epoch": 0.8224236320913058,
"grad_norm": 65666.4375,
"learning_rate": 8.87881839543471e-06,
"loss": 0.7792,
"step": 4900
},
{
"epoch": 0.8392077878482712,
"grad_norm": 66860.4765625,
"learning_rate": 8.039610607586439e-06,
"loss": 0.764,
"step": 5000
},
{
"epoch": 0.8559919436052367,
"grad_norm": 75992.640625,
"learning_rate": 7.200402819738168e-06,
"loss": 0.7925,
"step": 5100
},
{
"epoch": 0.8727760993622021,
"grad_norm": 70527.9765625,
"learning_rate": 6.361195031889897e-06,
"loss": 0.7864,
"step": 5200
},
{
"epoch": 0.8895602551191675,
"grad_norm": 74602.359375,
"learning_rate": 5.5219872440416254e-06,
"loss": 0.7991,
"step": 5300
},
{
"epoch": 0.9063444108761329,
"grad_norm": 64026.41796875,
"learning_rate": 4.682779456193353e-06,
"loss": 0.7762,
"step": 5400
},
{
"epoch": 0.9231285666330984,
"grad_norm": 62320.39453125,
"learning_rate": 3.843571668345083e-06,
"loss": 0.789,
"step": 5500
},
{
"epoch": 0.9399127223900637,
"grad_norm": 59294.8984375,
"learning_rate": 3.0043638804968113e-06,
"loss": 0.7888,
"step": 5600
},
{
"epoch": 0.9566968781470292,
"grad_norm": 67846.1953125,
"learning_rate": 2.16515609264854e-06,
"loss": 0.7874,
"step": 5700
},
{
"epoch": 0.9734810339039947,
"grad_norm": 67274.59375,
"learning_rate": 1.3259483048002687e-06,
"loss": 0.7928,
"step": 5800
},
{
"epoch": 0.99026518966096,
"grad_norm": 72139.2734375,
"learning_rate": 4.867405169519974e-07,
"loss": 0.7832,
"step": 5900
},
{
"epoch": 1.0,
"step": 5958,
"total_flos": 9.08850744042455e+17,
"train_loss": 0.8458019172076853,
"train_runtime": 81918.9773,
"train_samples_per_second": 0.873,
"train_steps_per_second": 0.073
}
],
"logging_steps": 100,
"max_steps": 5958,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5958,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.08850744042455e+17,
"train_batch_size": 12,
"trial_name": null,
"trial_params": null
}