Training in progress, step 75, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 536906096
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f914ea3dc68224bd55d7107b08f59ed23bf5bf397814ee9b20fe839f6a26204
|
3 |
size 536906096
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 269267284
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4cf21319f12883ef19a21dd10c8f43a0a77836b0ede2357a2fb6f15dc87066b3
|
3 |
size 269267284
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d5241e37cffc4e4b4f74447637f4dfcba8fa80c5a3822bf57d56e725a1724e3b
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aee2b620608ef64c9c8f70ed72f3a0e1bf233746a6ec27ad47abebf797bd2580
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 500,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -307,13 +307,163 @@
|
|
307 |
"learning_rate": 0.0002,
|
308 |
"loss": 0.3281,
|
309 |
"step": 50
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
310 |
}
|
311 |
],
|
312 |
"logging_steps": 1,
|
313 |
"max_steps": 250,
|
314 |
"num_train_epochs": 1,
|
315 |
"save_steps": 25,
|
316 |
-
"total_flos":
|
317 |
"trial_name": null,
|
318 |
"trial_params": null
|
319 |
}
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.3,
|
5 |
"eval_steps": 500,
|
6 |
+
"global_step": 75,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
307 |
"learning_rate": 0.0002,
|
308 |
"loss": 0.3281,
|
309 |
"step": 50
|
310 |
+
},
|
311 |
+
{
|
312 |
+
"epoch": 0.2,
|
313 |
+
"learning_rate": 0.0002,
|
314 |
+
"loss": 0.6814,
|
315 |
+
"step": 51
|
316 |
+
},
|
317 |
+
{
|
318 |
+
"epoch": 0.21,
|
319 |
+
"learning_rate": 0.0002,
|
320 |
+
"loss": 0.6904,
|
321 |
+
"step": 52
|
322 |
+
},
|
323 |
+
{
|
324 |
+
"epoch": 0.21,
|
325 |
+
"learning_rate": 0.0002,
|
326 |
+
"loss": 0.6387,
|
327 |
+
"step": 53
|
328 |
+
},
|
329 |
+
{
|
330 |
+
"epoch": 0.22,
|
331 |
+
"learning_rate": 0.0002,
|
332 |
+
"loss": 0.6075,
|
333 |
+
"step": 54
|
334 |
+
},
|
335 |
+
{
|
336 |
+
"epoch": 0.22,
|
337 |
+
"learning_rate": 0.0002,
|
338 |
+
"loss": 0.5507,
|
339 |
+
"step": 55
|
340 |
+
},
|
341 |
+
{
|
342 |
+
"epoch": 0.22,
|
343 |
+
"learning_rate": 0.0002,
|
344 |
+
"loss": 0.5008,
|
345 |
+
"step": 56
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"epoch": 0.23,
|
349 |
+
"learning_rate": 0.0002,
|
350 |
+
"loss": 0.5305,
|
351 |
+
"step": 57
|
352 |
+
},
|
353 |
+
{
|
354 |
+
"epoch": 0.23,
|
355 |
+
"learning_rate": 0.0002,
|
356 |
+
"loss": 0.4997,
|
357 |
+
"step": 58
|
358 |
+
},
|
359 |
+
{
|
360 |
+
"epoch": 0.24,
|
361 |
+
"learning_rate": 0.0002,
|
362 |
+
"loss": 0.4824,
|
363 |
+
"step": 59
|
364 |
+
},
|
365 |
+
{
|
366 |
+
"epoch": 0.24,
|
367 |
+
"learning_rate": 0.0002,
|
368 |
+
"loss": 0.5156,
|
369 |
+
"step": 60
|
370 |
+
},
|
371 |
+
{
|
372 |
+
"epoch": 0.24,
|
373 |
+
"learning_rate": 0.0002,
|
374 |
+
"loss": 0.4761,
|
375 |
+
"step": 61
|
376 |
+
},
|
377 |
+
{
|
378 |
+
"epoch": 0.25,
|
379 |
+
"learning_rate": 0.0002,
|
380 |
+
"loss": 0.5306,
|
381 |
+
"step": 62
|
382 |
+
},
|
383 |
+
{
|
384 |
+
"epoch": 0.25,
|
385 |
+
"learning_rate": 0.0002,
|
386 |
+
"loss": 0.4929,
|
387 |
+
"step": 63
|
388 |
+
},
|
389 |
+
{
|
390 |
+
"epoch": 0.26,
|
391 |
+
"learning_rate": 0.0002,
|
392 |
+
"loss": 0.4895,
|
393 |
+
"step": 64
|
394 |
+
},
|
395 |
+
{
|
396 |
+
"epoch": 0.26,
|
397 |
+
"learning_rate": 0.0002,
|
398 |
+
"loss": 0.5044,
|
399 |
+
"step": 65
|
400 |
+
},
|
401 |
+
{
|
402 |
+
"epoch": 0.26,
|
403 |
+
"learning_rate": 0.0002,
|
404 |
+
"loss": 0.5326,
|
405 |
+
"step": 66
|
406 |
+
},
|
407 |
+
{
|
408 |
+
"epoch": 0.27,
|
409 |
+
"learning_rate": 0.0002,
|
410 |
+
"loss": 0.5032,
|
411 |
+
"step": 67
|
412 |
+
},
|
413 |
+
{
|
414 |
+
"epoch": 0.27,
|
415 |
+
"learning_rate": 0.0002,
|
416 |
+
"loss": 0.4838,
|
417 |
+
"step": 68
|
418 |
+
},
|
419 |
+
{
|
420 |
+
"epoch": 0.28,
|
421 |
+
"learning_rate": 0.0002,
|
422 |
+
"loss": 0.4525,
|
423 |
+
"step": 69
|
424 |
+
},
|
425 |
+
{
|
426 |
+
"epoch": 0.28,
|
427 |
+
"learning_rate": 0.0002,
|
428 |
+
"loss": 0.4486,
|
429 |
+
"step": 70
|
430 |
+
},
|
431 |
+
{
|
432 |
+
"epoch": 0.28,
|
433 |
+
"learning_rate": 0.0002,
|
434 |
+
"loss": 0.4566,
|
435 |
+
"step": 71
|
436 |
+
},
|
437 |
+
{
|
438 |
+
"epoch": 0.29,
|
439 |
+
"learning_rate": 0.0002,
|
440 |
+
"loss": 0.4455,
|
441 |
+
"step": 72
|
442 |
+
},
|
443 |
+
{
|
444 |
+
"epoch": 0.29,
|
445 |
+
"learning_rate": 0.0002,
|
446 |
+
"loss": 0.4226,
|
447 |
+
"step": 73
|
448 |
+
},
|
449 |
+
{
|
450 |
+
"epoch": 0.3,
|
451 |
+
"learning_rate": 0.0002,
|
452 |
+
"loss": 0.4411,
|
453 |
+
"step": 74
|
454 |
+
},
|
455 |
+
{
|
456 |
+
"epoch": 0.3,
|
457 |
+
"learning_rate": 0.0002,
|
458 |
+
"loss": 0.4551,
|
459 |
+
"step": 75
|
460 |
}
|
461 |
],
|
462 |
"logging_steps": 1,
|
463 |
"max_steps": 250,
|
464 |
"num_train_epochs": 1,
|
465 |
"save_steps": 25,
|
466 |
+
"total_flos": 6048210105630720.0,
|
467 |
"trial_name": null,
|
468 |
"trial_params": null
|
469 |
}
|