besimray commited on
Commit
3267027
1 Parent(s): d53f114

Training in progress, step 160, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69b47bdf9d9dbb19daa258c7cfd7d4a464d9687724469943937b1352b2831b75
3
  size 90207248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8425d507f0f36ee1eef998c15601287109913addc9b4d411f74a29a0e727069f
3
  size 90207248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f606363e00340c98ba428dd4f93d560320af0ff1c69f0c2cd00396f45ea7f0db
3
  size 46057082
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e437285ca548e6751dac63fe81fa1d7d7c5390e739df7e4059dbd16f1e1d20af
3
  size 46057082
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:320193f57a9c9ab92eeb8e7b288aa137ef49a372e28016d95c7f7930d933ca5d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3583a844a2ebe7f88f14edfae8eae81e5f1252d9e214fd1adf0c1db5eb1b93a5
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8fa6a7a6709edeb55cdf23229934c07be2d8aae0e4056fbdb6ff2482d0eb3d3
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac2f052986474c47aa92ee09f81c986cf5cd3c01bde827e5d887e85b2bbda4c2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 1.5968632698059082,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-130",
4
- "epoch": 0.04235792446170138,
5
  "eval_steps": 10,
6
- "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1185,6 +1185,84 @@
1185
  "eval_samples_per_second": 5.593,
1186
  "eval_steps_per_second": 5.593,
1187
  "step": 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1188
  }
1189
  ],
1190
  "logging_steps": 1,
@@ -1199,7 +1277,7 @@
1199
  "early_stopping_threshold": 0.0
1200
  },
1201
  "attributes": {
1202
- "early_stopping_patience_counter": 2
1203
  }
1204
  },
1205
  "TrainerControl": {
@@ -1208,12 +1286,12 @@
1208
  "should_evaluate": false,
1209
  "should_log": false,
1210
  "should_save": true,
1211
- "should_training_stop": false
1212
  },
1213
  "attributes": {}
1214
  }
1215
  },
1216
- "total_flos": 1.46820537778176e+16,
1217
  "train_batch_size": 1,
1218
  "trial_name": null,
1219
  "trial_params": null
 
1
  {
2
  "best_metric": 1.5968632698059082,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-130",
4
+ "epoch": 0.04518178609248147,
5
  "eval_steps": 10,
6
+ "global_step": 160,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1185
  "eval_samples_per_second": 5.593,
1186
  "eval_steps_per_second": 5.593,
1187
  "step": 150
1188
+ },
1189
+ {
1190
+ "epoch": 0.04264031062477939,
1191
+ "grad_norm": 1.4214565753936768,
1192
+ "learning_rate": 0.00016184643766056317,
1193
+ "loss": 1.9267,
1194
+ "step": 151
1195
+ },
1196
+ {
1197
+ "epoch": 0.042922696787857394,
1198
+ "grad_norm": 1.2661665678024292,
1199
+ "learning_rate": 0.00016134135287043669,
1200
+ "loss": 1.8779,
1201
+ "step": 152
1202
+ },
1203
+ {
1204
+ "epoch": 0.043205082950935406,
1205
+ "grad_norm": 2.253584384918213,
1206
+ "learning_rate": 0.00016083374657755134,
1207
+ "loss": 1.2,
1208
+ "step": 153
1209
+ },
1210
+ {
1211
+ "epoch": 0.04348746911401341,
1212
+ "grad_norm": 2.3451638221740723,
1213
+ "learning_rate": 0.00016032363964761363,
1214
+ "loss": 2.1057,
1215
+ "step": 154
1216
+ },
1217
+ {
1218
+ "epoch": 0.04376985527709142,
1219
+ "grad_norm": 2.77101731300354,
1220
+ "learning_rate": 0.00015981105304912162,
1221
+ "loss": 1.8791,
1222
+ "step": 155
1223
+ },
1224
+ {
1225
+ "epoch": 0.044052241440169435,
1226
+ "grad_norm": 1.678722620010376,
1227
+ "learning_rate": 0.00015929600785250257,
1228
+ "loss": 2.4479,
1229
+ "step": 156
1230
+ },
1231
+ {
1232
+ "epoch": 0.04433462760324744,
1233
+ "grad_norm": 1.2198508977890015,
1234
+ "learning_rate": 0.00015877852522924732,
1235
+ "loss": 2.3366,
1236
+ "step": 157
1237
+ },
1238
+ {
1239
+ "epoch": 0.04461701376632545,
1240
+ "grad_norm": 5.628009796142578,
1241
+ "learning_rate": 0.0001582586264510396,
1242
+ "loss": 1.3177,
1243
+ "step": 158
1244
+ },
1245
+ {
1246
+ "epoch": 0.04489939992940346,
1247
+ "grad_norm": 2.065458297729492,
1248
+ "learning_rate": 0.00015773633288888197,
1249
+ "loss": 2.0971,
1250
+ "step": 159
1251
+ },
1252
+ {
1253
+ "epoch": 0.04518178609248147,
1254
+ "grad_norm": 0.9564564824104309,
1255
+ "learning_rate": 0.00015721166601221698,
1256
+ "loss": 1.3886,
1257
+ "step": 160
1258
+ },
1259
+ {
1260
+ "epoch": 0.04518178609248147,
1261
+ "eval_loss": 1.6064260005950928,
1262
+ "eval_runtime": 133.4242,
1263
+ "eval_samples_per_second": 5.591,
1264
+ "eval_steps_per_second": 5.591,
1265
+ "step": 160
1266
  }
1267
  ],
1268
  "logging_steps": 1,
 
1277
  "early_stopping_threshold": 0.0
1278
  },
1279
  "attributes": {
1280
+ "early_stopping_patience_counter": 3
1281
  }
1282
  },
1283
  "TrainerControl": {
 
1286
  "should_evaluate": false,
1287
  "should_log": false,
1288
  "should_save": true,
1289
+ "should_training_stop": true
1290
  },
1291
  "attributes": {}
1292
  }
1293
  },
1294
+ "total_flos": 1.566085736300544e+16,
1295
  "train_batch_size": 1,
1296
  "trial_name": null,
1297
  "trial_params": null