wolpertinger / checkpoint-84 /trainer_state.json
Luggi's picture
Upload folder using huggingface_hub
301feac verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 17,
"global_step": 84,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.011904761904761904,
"grad_norm": 0.26561033725738525,
"learning_rate": 6.666666666666667e-05,
"loss": 2.2541,
"step": 1
},
{
"epoch": 0.023809523809523808,
"grad_norm": 0.2501536011695862,
"learning_rate": 0.00013333333333333334,
"loss": 2.2996,
"step": 2
},
{
"epoch": 0.03571428571428571,
"grad_norm": 0.23626406490802765,
"learning_rate": 0.0002,
"loss": 2.2516,
"step": 3
},
{
"epoch": 0.047619047619047616,
"grad_norm": 0.2351306676864624,
"learning_rate": 0.00019992479525042303,
"loss": 2.0972,
"step": 4
},
{
"epoch": 0.05952380952380952,
"grad_norm": 0.29416894912719727,
"learning_rate": 0.0001996992941167792,
"loss": 1.9448,
"step": 5
},
{
"epoch": 0.07142857142857142,
"grad_norm": 0.4979971945285797,
"learning_rate": 0.00019932383577419432,
"loss": 1.8433,
"step": 6
},
{
"epoch": 0.08333333333333333,
"grad_norm": 0.3428308963775635,
"learning_rate": 0.00019879898494768093,
"loss": 1.7351,
"step": 7
},
{
"epoch": 0.09523809523809523,
"grad_norm": 0.46657490730285645,
"learning_rate": 0.00019812553106273847,
"loss": 1.5867,
"step": 8
},
{
"epoch": 0.10714285714285714,
"grad_norm": 0.4063335955142975,
"learning_rate": 0.00019730448705798239,
"loss": 1.3473,
"step": 9
},
{
"epoch": 0.11904761904761904,
"grad_norm": 0.5489273071289062,
"learning_rate": 0.00019633708786158806,
"loss": 1.3747,
"step": 10
},
{
"epoch": 0.13095238095238096,
"grad_norm": 0.3660918176174164,
"learning_rate": 0.00019522478853384155,
"loss": 1.2575,
"step": 11
},
{
"epoch": 0.14285714285714285,
"grad_norm": 0.28144609928131104,
"learning_rate": 0.00019396926207859084,
"loss": 1.2127,
"step": 12
},
{
"epoch": 0.15476190476190477,
"grad_norm": 0.299657940864563,
"learning_rate": 0.00019257239692688907,
"loss": 1.2426,
"step": 13
},
{
"epoch": 0.16666666666666666,
"grad_norm": 0.18204455077648163,
"learning_rate": 0.0001910362940966147,
"loss": 1.1797,
"step": 14
},
{
"epoch": 0.17857142857142858,
"grad_norm": 0.19023093581199646,
"learning_rate": 0.00018936326403234125,
"loss": 1.1511,
"step": 15
},
{
"epoch": 0.19047619047619047,
"grad_norm": 0.12908512353897095,
"learning_rate": 0.0001875558231302091,
"loss": 1.0714,
"step": 16
},
{
"epoch": 0.20238095238095238,
"grad_norm": 0.10724656283855438,
"learning_rate": 0.00018561668995302667,
"loss": 1.1114,
"step": 17
},
{
"epoch": 0.20238095238095238,
"eval_loss": 1.1259114742279053,
"eval_runtime": 248.1643,
"eval_samples_per_second": 2.708,
"eval_steps_per_second": 0.338,
"step": 17
},
{
"epoch": 0.21428571428571427,
"grad_norm": 0.10084182024002075,
"learning_rate": 0.00018354878114129367,
"loss": 1.2143,
"step": 18
},
{
"epoch": 0.2261904761904762,
"grad_norm": 0.14486701786518097,
"learning_rate": 0.00018135520702629675,
"loss": 1.127,
"step": 19
},
{
"epoch": 0.23809523809523808,
"grad_norm": 0.08842574805021286,
"learning_rate": 0.00017903926695187595,
"loss": 1.1359,
"step": 20
},
{
"epoch": 0.25,
"grad_norm": 0.08982816338539124,
"learning_rate": 0.0001766044443118978,
"loss": 1.136,
"step": 21
},
{
"epoch": 0.2619047619047619,
"grad_norm": 0.08831178396940231,
"learning_rate": 0.00017405440131090048,
"loss": 1.104,
"step": 22
},
{
"epoch": 0.27380952380952384,
"grad_norm": 0.09078623354434967,
"learning_rate": 0.00017139297345578994,
"loss": 1.1852,
"step": 23
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.09844227880239487,
"learning_rate": 0.0001686241637868734,
"loss": 1.1274,
"step": 24
},
{
"epoch": 0.2976190476190476,
"grad_norm": 0.09135384112596512,
"learning_rate": 0.0001657521368569064,
"loss": 1.1203,
"step": 25
},
{
"epoch": 0.30952380952380953,
"grad_norm": 0.09475894272327423,
"learning_rate": 0.00016278121246720987,
"loss": 1.0698,
"step": 26
},
{
"epoch": 0.32142857142857145,
"grad_norm": 0.09128192812204361,
"learning_rate": 0.00015971585917027862,
"loss": 1.1008,
"step": 27
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.1022096574306488,
"learning_rate": 0.00015656068754865387,
"loss": 1.1174,
"step": 28
},
{
"epoch": 0.34523809523809523,
"grad_norm": 0.09190971404314041,
"learning_rate": 0.00015332044328016914,
"loss": 1.077,
"step": 29
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.09310519695281982,
"learning_rate": 0.00015000000000000001,
"loss": 1.1045,
"step": 30
},
{
"epoch": 0.36904761904761907,
"grad_norm": 0.09474918991327286,
"learning_rate": 0.0001466043519702539,
"loss": 1.1492,
"step": 31
},
{
"epoch": 0.38095238095238093,
"grad_norm": 0.08711487054824829,
"learning_rate": 0.00014313860656812536,
"loss": 1.0322,
"step": 32
},
{
"epoch": 0.39285714285714285,
"grad_norm": 0.09338130801916122,
"learning_rate": 0.0001396079766039157,
"loss": 1.1269,
"step": 33
},
{
"epoch": 0.40476190476190477,
"grad_norm": 0.08810640871524811,
"learning_rate": 0.00013601777248047105,
"loss": 1.1308,
"step": 34
},
{
"epoch": 0.40476190476190477,
"eval_loss": 1.067408561706543,
"eval_runtime": 248.1784,
"eval_samples_per_second": 2.708,
"eval_steps_per_second": 0.338,
"step": 34
},
{
"epoch": 0.4166666666666667,
"grad_norm": 0.10047192871570587,
"learning_rate": 0.00013237339420583212,
"loss": 1.1319,
"step": 35
},
{
"epoch": 0.42857142857142855,
"grad_norm": 0.10280368477106094,
"learning_rate": 0.00012868032327110904,
"loss": 1.1145,
"step": 36
},
{
"epoch": 0.44047619047619047,
"grad_norm": 0.0960673838853836,
"learning_rate": 0.00012494411440579814,
"loss": 1.0966,
"step": 37
},
{
"epoch": 0.4523809523809524,
"grad_norm": 0.10409148037433624,
"learning_rate": 0.0001211703872229411,
"loss": 1.0936,
"step": 38
},
{
"epoch": 0.4642857142857143,
"grad_norm": 0.09941337257623672,
"learning_rate": 0.00011736481776669306,
"loss": 1.109,
"step": 39
},
{
"epoch": 0.47619047619047616,
"grad_norm": 0.09119969606399536,
"learning_rate": 0.00011353312997501313,
"loss": 0.9867,
"step": 40
},
{
"epoch": 0.4880952380952381,
"grad_norm": 0.09008549153804779,
"learning_rate": 0.00010968108707031792,
"loss": 1.0514,
"step": 41
},
{
"epoch": 0.5,
"grad_norm": 0.09608256071805954,
"learning_rate": 0.00010581448289104758,
"loss": 1.05,
"step": 42
},
{
"epoch": 0.5119047619047619,
"grad_norm": 0.09066122770309448,
"learning_rate": 0.00010193913317718244,
"loss": 1.1273,
"step": 43
},
{
"epoch": 0.5238095238095238,
"grad_norm": 0.09085338562726974,
"learning_rate": 9.806086682281758e-05,
"loss": 1.0426,
"step": 44
},
{
"epoch": 0.5357142857142857,
"grad_norm": 0.08922470360994339,
"learning_rate": 9.418551710895243e-05,
"loss": 1.0485,
"step": 45
},
{
"epoch": 0.5476190476190477,
"grad_norm": 0.09132881462574005,
"learning_rate": 9.03189129296821e-05,
"loss": 1.0966,
"step": 46
},
{
"epoch": 0.5595238095238095,
"grad_norm": 0.09102962166070938,
"learning_rate": 8.646687002498692e-05,
"loss": 1.0555,
"step": 47
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.09371238946914673,
"learning_rate": 8.263518223330697e-05,
"loss": 1.0693,
"step": 48
},
{
"epoch": 0.5833333333333334,
"grad_norm": 0.08540762215852737,
"learning_rate": 7.882961277705895e-05,
"loss": 0.9991,
"step": 49
},
{
"epoch": 0.5952380952380952,
"grad_norm": 0.08648107200860977,
"learning_rate": 7.505588559420189e-05,
"loss": 1.0399,
"step": 50
},
{
"epoch": 0.6071428571428571,
"grad_norm": 0.08525101095438004,
"learning_rate": 7.131967672889101e-05,
"loss": 1.0251,
"step": 51
},
{
"epoch": 0.6071428571428571,
"eval_loss": 1.0418401956558228,
"eval_runtime": 248.0851,
"eval_samples_per_second": 2.709,
"eval_steps_per_second": 0.339,
"step": 51
},
{
"epoch": 0.6190476190476191,
"grad_norm": 0.09358056634664536,
"learning_rate": 6.762660579416791e-05,
"loss": 1.151,
"step": 52
},
{
"epoch": 0.6309523809523809,
"grad_norm": 0.09469255059957504,
"learning_rate": 6.398222751952899e-05,
"loss": 1.0479,
"step": 53
},
{
"epoch": 0.6428571428571429,
"grad_norm": 0.09118176251649857,
"learning_rate": 6.039202339608432e-05,
"loss": 1.0467,
"step": 54
},
{
"epoch": 0.6547619047619048,
"grad_norm": 0.0902942344546318,
"learning_rate": 5.6861393431874675e-05,
"loss": 0.9988,
"step": 55
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.09111825376749039,
"learning_rate": 5.339564802974615e-05,
"loss": 1.0067,
"step": 56
},
{
"epoch": 0.6785714285714286,
"grad_norm": 0.1021014079451561,
"learning_rate": 5.000000000000002e-05,
"loss": 1.0492,
"step": 57
},
{
"epoch": 0.6904761904761905,
"grad_norm": 0.09211082756519318,
"learning_rate": 4.66795567198309e-05,
"loss": 1.0085,
"step": 58
},
{
"epoch": 0.7023809523809523,
"grad_norm": 0.09428705275058746,
"learning_rate": 4.343931245134616e-05,
"loss": 1.0831,
"step": 59
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.09291902184486389,
"learning_rate": 4.028414082972141e-05,
"loss": 1.1003,
"step": 60
},
{
"epoch": 0.7261904761904762,
"grad_norm": 0.08907562494277954,
"learning_rate": 3.721878753279017e-05,
"loss": 1.0676,
"step": 61
},
{
"epoch": 0.7380952380952381,
"grad_norm": 0.09440898895263672,
"learning_rate": 3.424786314309365e-05,
"loss": 1.0129,
"step": 62
},
{
"epoch": 0.75,
"grad_norm": 0.09342656284570694,
"learning_rate": 3.137583621312665e-05,
"loss": 1.0515,
"step": 63
},
{
"epoch": 0.7619047619047619,
"grad_norm": 0.09086596220731735,
"learning_rate": 2.8607026544210114e-05,
"loss": 1.0601,
"step": 64
},
{
"epoch": 0.7738095238095238,
"grad_norm": 0.09026821702718735,
"learning_rate": 2.594559868909956e-05,
"loss": 1.0162,
"step": 65
},
{
"epoch": 0.7857142857142857,
"grad_norm": 0.09036453813314438,
"learning_rate": 2.339555568810221e-05,
"loss": 1.0327,
"step": 66
},
{
"epoch": 0.7976190476190477,
"grad_norm": 0.08887065201997757,
"learning_rate": 2.0960733048124083e-05,
"loss": 1.0744,
"step": 67
},
{
"epoch": 0.8095238095238095,
"grad_norm": 0.08880475908517838,
"learning_rate": 1.864479297370325e-05,
"loss": 1.0612,
"step": 68
},
{
"epoch": 0.8095238095238095,
"eval_loss": 1.031933069229126,
"eval_runtime": 248.1305,
"eval_samples_per_second": 2.708,
"eval_steps_per_second": 0.339,
"step": 68
},
{
"epoch": 0.8214285714285714,
"grad_norm": 0.0960259959101677,
"learning_rate": 1.6451218858706374e-05,
"loss": 1.0564,
"step": 69
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.09149077534675598,
"learning_rate": 1.4383310046973365e-05,
"loss": 1.0129,
"step": 70
},
{
"epoch": 0.8452380952380952,
"grad_norm": 0.08991867303848267,
"learning_rate": 1.2444176869790925e-05,
"loss": 1.0187,
"step": 71
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.09646643698215485,
"learning_rate": 1.0636735967658784e-05,
"loss": 1.0561,
"step": 72
},
{
"epoch": 0.8690476190476191,
"grad_norm": 0.09574442356824875,
"learning_rate": 8.963705903385345e-06,
"loss": 1.0474,
"step": 73
},
{
"epoch": 0.8809523809523809,
"grad_norm": 0.0916275605559349,
"learning_rate": 7.427603073110967e-06,
"loss": 1.0217,
"step": 74
},
{
"epoch": 0.8928571428571429,
"grad_norm": 0.099373459815979,
"learning_rate": 6.030737921409169e-06,
"loss": 1.0831,
"step": 75
},
{
"epoch": 0.9047619047619048,
"grad_norm": 0.0911068245768547,
"learning_rate": 4.775211466158469e-06,
"loss": 1.0488,
"step": 76
},
{
"epoch": 0.9166666666666666,
"grad_norm": 0.0971531942486763,
"learning_rate": 3.662912138411967e-06,
"loss": 1.0365,
"step": 77
},
{
"epoch": 0.9285714285714286,
"grad_norm": 0.0948888435959816,
"learning_rate": 2.6955129420176196e-06,
"loss": 1.0705,
"step": 78
},
{
"epoch": 0.9404761904761905,
"grad_norm": 0.09453336149454117,
"learning_rate": 1.874468937261531e-06,
"loss": 1.0429,
"step": 79
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.10001447796821594,
"learning_rate": 1.201015052319099e-06,
"loss": 0.985,
"step": 80
},
{
"epoch": 0.9642857142857143,
"grad_norm": 0.09593307226896286,
"learning_rate": 6.761642258056978e-07,
"loss": 1.0146,
"step": 81
},
{
"epoch": 0.9761904761904762,
"grad_norm": 0.08949106186628342,
"learning_rate": 3.007058832207976e-07,
"loss": 1.0151,
"step": 82
},
{
"epoch": 0.9880952380952381,
"grad_norm": 0.09485086053609848,
"learning_rate": 7.520474957699586e-08,
"loss": 1.0602,
"step": 83
},
{
"epoch": 1.0,
"grad_norm": 0.0924575999379158,
"learning_rate": 0.0,
"loss": 1.0294,
"step": 84
}
],
"logging_steps": 1,
"max_steps": 84,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.334928379474739e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}