mymodel / trainer_state.json
mohit95559's picture
Upload folder using huggingface_hub
10631da verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9157088122605364,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.038314176245210725,
"grad_norm": 0.3217230917151008,
"learning_rate": 1.8867924528301888e-05,
"loss": 2.4318,
"step": 10
},
{
"epoch": 0.07662835249042145,
"grad_norm": 0.40615181133127565,
"learning_rate": 3.7735849056603776e-05,
"loss": 2.4277,
"step": 20
},
{
"epoch": 0.11494252873563218,
"grad_norm": 0.6561447102516671,
"learning_rate": 5.660377358490566e-05,
"loss": 2.433,
"step": 30
},
{
"epoch": 0.1532567049808429,
"grad_norm": 0.47475244248065424,
"learning_rate": 7.547169811320755e-05,
"loss": 2.4355,
"step": 40
},
{
"epoch": 0.19157088122605365,
"grad_norm": 0.7543768354889395,
"learning_rate": 9.433962264150944e-05,
"loss": 2.4467,
"step": 50
},
{
"epoch": 0.22988505747126436,
"grad_norm": 0.45899820520400003,
"learning_rate": 9.994504457428558e-05,
"loss": 2.4393,
"step": 60
},
{
"epoch": 0.2681992337164751,
"grad_norm": 0.4823946996990999,
"learning_rate": 9.967616591677905e-05,
"loss": 2.4541,
"step": 70
},
{
"epoch": 0.3065134099616858,
"grad_norm": 0.3393294030578001,
"learning_rate": 9.918447466584544e-05,
"loss": 2.4523,
"step": 80
},
{
"epoch": 0.3448275862068966,
"grad_norm": 0.5651147369232912,
"learning_rate": 9.847217620510815e-05,
"loss": 2.4576,
"step": 90
},
{
"epoch": 0.3831417624521073,
"grad_norm": 0.34879332121566425,
"learning_rate": 9.754246540809257e-05,
"loss": 2.4504,
"step": 100
},
{
"epoch": 0.421455938697318,
"grad_norm": 0.5594959779731135,
"learning_rate": 9.639951230825433e-05,
"loss": 2.466,
"step": 110
},
{
"epoch": 0.45977011494252873,
"grad_norm": 0.274770129693475,
"learning_rate": 9.504844339512095e-05,
"loss": 2.4469,
"step": 120
},
{
"epoch": 0.49808429118773945,
"grad_norm": 0.30708437591742765,
"learning_rate": 9.349531862043952e-05,
"loss": 2.4521,
"step": 130
},
{
"epoch": 0.5363984674329502,
"grad_norm": 0.28940091280377206,
"learning_rate": 9.174710421746444e-05,
"loss": 2.4596,
"step": 140
},
{
"epoch": 0.5747126436781609,
"grad_norm": 0.33901258469233675,
"learning_rate": 8.981164145529944e-05,
"loss": 2.4572,
"step": 150
},
{
"epoch": 0.6130268199233716,
"grad_norm": 0.29452774712566837,
"learning_rate": 8.769761146843949e-05,
"loss": 2.4545,
"step": 160
},
{
"epoch": 0.6513409961685823,
"grad_norm": 0.28040855801882,
"learning_rate": 8.541449631926325e-05,
"loss": 2.4562,
"step": 170
},
{
"epoch": 0.6896551724137931,
"grad_norm": 0.26567815293624064,
"learning_rate": 8.297253646812213e-05,
"loss": 2.4482,
"step": 180
},
{
"epoch": 0.7279693486590039,
"grad_norm": 0.6149158857790219,
"learning_rate": 8.038268484178565e-05,
"loss": 2.4553,
"step": 190
},
{
"epoch": 0.7662835249042146,
"grad_norm": 0.27934075531158126,
"learning_rate": 7.765655770625997e-05,
"loss": 2.4463,
"step": 200
},
{
"epoch": 0.8045977011494253,
"grad_norm": 0.26860998367945094,
"learning_rate": 7.480638256432977e-05,
"loss": 2.4514,
"step": 210
},
{
"epoch": 0.842911877394636,
"grad_norm": 0.34035821167200137,
"learning_rate": 7.184494331151866e-05,
"loss": 2.449,
"step": 220
},
{
"epoch": 0.8812260536398467,
"grad_norm": 0.262160342617283,
"learning_rate": 6.878552289646041e-05,
"loss": 2.4381,
"step": 230
},
{
"epoch": 0.9195402298850575,
"grad_norm": 0.29975838142553335,
"learning_rate": 6.564184374286636e-05,
"loss": 2.4477,
"step": 240
},
{
"epoch": 0.9578544061302682,
"grad_norm": 0.3038130770045491,
"learning_rate": 6.242800620031434e-05,
"loss": 2.4393,
"step": 250
},
{
"epoch": 0.9961685823754789,
"grad_norm": 0.23643504791544426,
"learning_rate": 5.915842529992631e-05,
"loss": 2.4285,
"step": 260
},
{
"epoch": 1.0344827586206897,
"grad_norm": 0.4638398552745887,
"learning_rate": 5.584776609860414e-05,
"loss": 2.1607,
"step": 270
},
{
"epoch": 1.0727969348659003,
"grad_norm": 0.3369549986726721,
"learning_rate": 5.2510877901824286e-05,
"loss": 2.1133,
"step": 280
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.2891769614162093,
"learning_rate": 4.916272766002227e-05,
"loss": 2.1084,
"step": 290
},
{
"epoch": 1.1494252873563218,
"grad_norm": 0.25599077000320447,
"learning_rate": 4.5818332837303665e-05,
"loss": 2.1109,
"step": 300
},
{
"epoch": 1.1877394636015326,
"grad_norm": 0.25505449141811376,
"learning_rate": 4.249269405358634e-05,
"loss": 2.1141,
"step": 310
},
{
"epoch": 1.2260536398467432,
"grad_norm": 0.24717989839985094,
"learning_rate": 3.920072780229378e-05,
"loss": 2.1141,
"step": 320
},
{
"epoch": 1.264367816091954,
"grad_norm": 0.24533136645961962,
"learning_rate": 3.595719954538122e-05,
"loss": 2.1111,
"step": 330
},
{
"epoch": 1.3026819923371646,
"grad_norm": 0.2405802075099404,
"learning_rate": 3.277665748578336e-05,
"loss": 2.1086,
"step": 340
},
{
"epoch": 1.3409961685823755,
"grad_norm": 0.24564169644979666,
"learning_rate": 2.9673367314334532e-05,
"loss": 2.1076,
"step": 350
},
{
"epoch": 1.3793103448275863,
"grad_norm": 0.23899109047951717,
"learning_rate": 2.6661248223840707e-05,
"loss": 2.1092,
"step": 360
},
{
"epoch": 1.417624521072797,
"grad_norm": 0.24093321299505463,
"learning_rate": 2.37538104772998e-05,
"loss": 2.1092,
"step": 370
},
{
"epoch": 1.4559386973180077,
"grad_norm": 0.2353684891980503,
"learning_rate": 2.0964094810295558e-05,
"loss": 2.1158,
"step": 380
},
{
"epoch": 1.4942528735632183,
"grad_norm": 0.2338179136134976,
"learning_rate": 1.830461393936353e-05,
"loss": 2.1139,
"step": 390
},
{
"epoch": 1.5325670498084292,
"grad_norm": 0.2338961312820727,
"learning_rate": 1.578729643868181e-05,
"loss": 2.1143,
"step": 400
},
{
"epoch": 1.5708812260536398,
"grad_norm": 0.2296079266331823,
"learning_rate": 1.3423433236816563e-05,
"loss": 2.1117,
"step": 410
},
{
"epoch": 1.6091954022988506,
"grad_norm": 0.2318348232005023,
"learning_rate": 1.1223626973500396e-05,
"loss": 2.1109,
"step": 420
},
{
"epoch": 1.6475095785440614,
"grad_norm": 0.22777895966815453,
"learning_rate": 9.197744443594003e-06,
"loss": 2.1023,
"step": 430
},
{
"epoch": 1.685823754789272,
"grad_norm": 0.2294544601103437,
"learning_rate": 7.3548723415340196e-06,
"loss": 2.1046,
"step": 440
},
{
"epoch": 1.7241379310344827,
"grad_norm": 0.22902930560321783,
"learning_rate": 5.7032765047665136e-06,
"loss": 2.1181,
"step": 450
},
{
"epoch": 1.7624521072796935,
"grad_norm": 0.2266817684502059,
"learning_rate": 4.250364838972065e-06,
"loss": 2.1089,
"step": 460
},
{
"epoch": 1.8007662835249043,
"grad_norm": 0.2258576905472298,
"learning_rate": 3.0026540913734522e-06,
"loss": 2.1077,
"step": 470
},
{
"epoch": 1.839080459770115,
"grad_norm": 0.2254304711001865,
"learning_rate": 1.9657406211579966e-06,
"loss": 2.1057,
"step": 480
},
{
"epoch": 1.8773946360153255,
"grad_norm": 0.21992296869073144,
"learning_rate": 1.1442752981179527e-06,
"loss": 2.1031,
"step": 490
},
{
"epoch": 1.9157088122605364,
"grad_norm": 0.21929533112603253,
"learning_rate": 5.41942642096177e-07,
"loss": 2.1109,
"step": 500
},
{
"epoch": 1.9157088122605364,
"eval_loss": 2.403846263885498,
"eval_runtime": 0.7812,
"eval_samples_per_second": 16.641,
"eval_steps_per_second": 1.28,
"step": 500
}
],
"logging_steps": 10,
"max_steps": 522,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1944814878720000.0,
"train_batch_size": 120,
"trial_name": null,
"trial_params": null
}